2024-11-27 13:23:27,819 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-27 13:23:27,832 main DEBUG Took 0.010768 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-27 13:23:27,832 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-27 13:23:27,833 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-27 13:23:27,834 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-27 13:23:27,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,843 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-27 13:23:27,854 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,856 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,857 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,857 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,858 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,858 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,859 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,859 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,859 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,860 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,862 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,862 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,863 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,863 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,863 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,864 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,864 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,865 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 13:23:27,865 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,866 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-27 13:23:27,867 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 13:23:27,868 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-27 13:23:27,870 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-27 13:23:27,870 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-27 13:23:27,872 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-27 13:23:27,872 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-27 13:23:27,881 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-27 13:23:27,883 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-27 13:23:27,885 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-27 13:23:27,885 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-27 13:23:27,886 main DEBUG createAppenders(={Console}) 2024-11-27 13:23:27,886 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-27 13:23:27,887 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-27 13:23:27,887 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-27 13:23:27,887 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-27 13:23:27,888 main DEBUG OutputStream closed 2024-11-27 13:23:27,888 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-27 13:23:27,888 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-27 13:23:27,888 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-27 13:23:27,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-27 13:23:27,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-27 13:23:27,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-27 13:23:27,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-27 13:23:27,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-27 13:23:27,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-27 13:23:27,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-27 13:23:27,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-27 13:23:27,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-27 13:23:27,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-27 13:23:27,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-27 13:23:27,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-27 13:23:27,966 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-27 13:23:27,966 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-27 13:23:27,966 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-27 13:23:27,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-27 13:23:27,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-27 13:23:27,968 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-27 13:23:27,970 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27 13:23:27,970 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-27 13:23:27,971 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-27 13:23:27,971 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-27T13:23:28,273 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a 2024-11-27 13:23:28,276 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-27 13:23:28,276 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27T13:23:28,286 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-27T13:23:28,309 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T13:23:28,312 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b, deleteOnExit=true 2024-11-27T13:23:28,312 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-27T13:23:28,313 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/test.cache.data in system properties and HBase conf 2024-11-27T13:23:28,314 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T13:23:28,314 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.log.dir in system properties and HBase conf 2024-11-27T13:23:28,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T13:23:28,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T13:23:28,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-27T13:23:28,413 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-27T13:23:28,507 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T13:23:28,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T13:23:28,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T13:23:28,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T13:23:28,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T13:23:28,512 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T13:23:28,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T13:23:28,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T13:23:28,513 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T13:23:28,514 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T13:23:28,514 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/nfs.dump.dir in system properties and HBase conf 2024-11-27T13:23:28,514 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/java.io.tmpdir in system properties and HBase conf 2024-11-27T13:23:28,515 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T13:23:28,515 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T13:23:28,516 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T13:23:29,353 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-27T13:23:29,429 INFO [Time-limited test {}] log.Log(170): Logging initialized @2408ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-27T13:23:29,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T13:23:29,566 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T13:23:29,587 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T13:23:29,587 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T13:23:29,589 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T13:23:29,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T13:23:29,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.log.dir/,AVAILABLE} 2024-11-27T13:23:29,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T13:23:29,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/java.io.tmpdir/jetty-localhost-46363-hadoop-hdfs-3_4_1-tests_jar-_-any-16455727605037675678/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T13:23:29,831 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:46363} 2024-11-27T13:23:29,831 INFO [Time-limited test {}] server.Server(415): Started @2811ms 2024-11-27T13:23:30,239 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T13:23:30,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T13:23:30,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T13:23:30,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T13:23:30,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T13:23:30,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.log.dir/,AVAILABLE} 2024-11-27T13:23:30,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T13:23:30,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/java.io.tmpdir/jetty-localhost-36291-hadoop-hdfs-3_4_1-tests_jar-_-any-17252408047576783012/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T13:23:30,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:36291} 2024-11-27T13:23:30,370 INFO [Time-limited test {}] server.Server(415): Started @3350ms 2024-11-27T13:23:30,428 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T13:23:30,915 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data2/current/BP-117857532-172.17.0.2-1732713809119/current, will proceed with Du for space computation calculation, 2024-11-27T13:23:30,915 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data1/current/BP-117857532-172.17.0.2-1732713809119/current, will proceed with Du for space computation calculation, 2024-11-27T13:23:30,966 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T13:23:31,032 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4b5ef08d3eef518 with lease ID 0x9d60f0b7014144bc: Processing first storage report for DS-b417a0b5-a251-40fa-a468-526a7141fcf9 from datanode DatanodeRegistration(127.0.0.1:45727, datanodeUuid=8633b0dd-898d-41a0-8b1f-aeedc2e4d162, infoPort=46543, infoSecurePort=0, ipcPort=39801, storageInfo=lv=-57;cid=testClusterID;nsid=189841029;c=1732713809119) 2024-11-27T13:23:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4b5ef08d3eef518 with lease ID 0x9d60f0b7014144bc: from storage DS-b417a0b5-a251-40fa-a468-526a7141fcf9 node DatanodeRegistration(127.0.0.1:45727, datanodeUuid=8633b0dd-898d-41a0-8b1f-aeedc2e4d162, infoPort=46543, infoSecurePort=0, ipcPort=39801, storageInfo=lv=-57;cid=testClusterID;nsid=189841029;c=1732713809119), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-27T13:23:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4b5ef08d3eef518 with lease ID 0x9d60f0b7014144bc: Processing first storage report for DS-49e9e6dc-5c41-4d2d-a0ca-41d92a1425a8 from datanode DatanodeRegistration(127.0.0.1:45727, datanodeUuid=8633b0dd-898d-41a0-8b1f-aeedc2e4d162, infoPort=46543, infoSecurePort=0, ipcPort=39801, storageInfo=lv=-57;cid=testClusterID;nsid=189841029;c=1732713809119) 2024-11-27T13:23:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4b5ef08d3eef518 with lease ID 0x9d60f0b7014144bc: from storage DS-49e9e6dc-5c41-4d2d-a0ca-41d92a1425a8 node DatanodeRegistration(127.0.0.1:45727, datanodeUuid=8633b0dd-898d-41a0-8b1f-aeedc2e4d162, infoPort=46543, infoSecurePort=0, ipcPort=39801, storageInfo=lv=-57;cid=testClusterID;nsid=189841029;c=1732713809119), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T13:23:31,055 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a 2024-11-27T13:23:31,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/zookeeper_0, clientPort=59011, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T13:23:31,151 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=59011 2024-11-27T13:23:31,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:31,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:31,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741825_1001 (size=7) 2024-11-27T13:23:31,833 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea with version=8 2024-11-27T13:23:31,833 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/hbase-staging 2024-11-27T13:23:31,976 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-27T13:23:32,248 INFO [Time-limited test {}] client.ConnectionUtils(129): master/a0541979a851:0 server-side Connection retries=45 2024-11-27T13:23:32,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,269 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T13:23:32,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T13:23:32,402 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T13:23:32,462 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-27T13:23:32,471 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-27T13:23:32,475 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T13:23:32,502 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 26450 (auto-detected) 2024-11-27T13:23:32,503 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-27T13:23:32,523 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34317 2024-11-27T13:23:32,534 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:32,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:32,555 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34317 connecting to ZooKeeper ensemble=127.0.0.1:59011 2024-11-27T13:23:32,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343170x0, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T13:23:32,590 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34317-0x100392645960000 connected 2024-11-27T13:23:32,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T13:23:32,623 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T13:23:32,626 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T13:23:32,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34317 2024-11-27T13:23:32,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34317 2024-11-27T13:23:32,633 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34317 2024-11-27T13:23:32,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34317 2024-11-27T13:23:32,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34317 2024-11-27T13:23:32,644 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea, hbase.cluster.distributed=false 2024-11-27T13:23:32,706 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/a0541979a851:0 server-side Connection retries=45 2024-11-27T13:23:32,706 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T13:23:32,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T13:23:32,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T13:23:32,709 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T13:23:32,711 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T13:23:32,712 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:32819 2024-11-27T13:23:32,714 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T13:23:32,722 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T13:23:32,723 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:32,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:32,730 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:32819 connecting to ZooKeeper ensemble=127.0.0.1:59011 2024-11-27T13:23:32,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328190x0, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T13:23:32,735 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32819-0x100392645960001 connected 2024-11-27T13:23:32,735 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T13:23:32,737 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T13:23:32,738 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T13:23:32,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32819 2024-11-27T13:23:32,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32819 2024-11-27T13:23:32,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32819 2024-11-27T13:23:32,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32819 2024-11-27T13:23:32,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32819 2024-11-27T13:23:32,750 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/a0541979a851,34317,1732713811966 2024-11-27T13:23:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T13:23:32,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T13:23:32,760 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a0541979a851,34317,1732713811966 2024-11-27T13:23:32,771 DEBUG [M:0;a0541979a851:34317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a0541979a851:34317 2024-11-27T13:23:32,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T13:23:32,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:32,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T13:23:32,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:32,785 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T13:23:32,785 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T13:23:32,786 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a0541979a851,34317,1732713811966 from backup master directory 2024-11-27T13:23:32,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a0541979a851,34317,1732713811966 2024-11-27T13:23:32,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T13:23:32,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T13:23:32,790 WARN [master/a0541979a851:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T13:23:32,790 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a0541979a851,34317,1732713811966 2024-11-27T13:23:32,793 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-27T13:23:32,795 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-27T13:23:32,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741826_1002 (size=42) 2024-11-27T13:23:33,267 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/hbase.id with ID: e2f87c7f-99d7-4c32-95d4-d56f61e27a72 2024-11-27T13:23:33,310 INFO [master/a0541979a851:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T13:23:33,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:33,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:33,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741827_1003 (size=196) 2024-11-27T13:23:33,771 INFO [master/a0541979a851:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:23:33,773 INFO [master/a0541979a851:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T13:23:33,792 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:33,796 INFO [master/a0541979a851:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T13:23:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741828_1004 (size=1189) 2024-11-27T13:23:34,251 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store 2024-11-27T13:23:34,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741829_1005 (size=34) 2024-11-27T13:23:34,273 INFO [master/a0541979a851:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-27T13:23:34,274 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:34,275 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T13:23:34,275 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:23:34,275 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:23:34,275 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T13:23:34,276 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:23:34,276 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:23:34,276 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T13:23:34,278 WARN [master/a0541979a851:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/.initializing 2024-11-27T13:23:34,278 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/WALs/a0541979a851,34317,1732713811966 2024-11-27T13:23:34,285 INFO [master/a0541979a851:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-27T13:23:34,296 INFO [master/a0541979a851:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a0541979a851%2C34317%2C1732713811966, suffix=, logDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/WALs/a0541979a851,34317,1732713811966, archiveDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/oldWALs, maxLogs=10 2024-11-27T13:23:34,319 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/WALs/a0541979a851,34317,1732713811966/a0541979a851%2C34317%2C1732713811966.1732713814301, exclude list is [], retry=0 2024-11-27T13:23:34,337 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45727,DS-b417a0b5-a251-40fa-a468-526a7141fcf9,DISK] 2024-11-27T13:23:34,340 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-27T13:23:34,378 INFO [master/a0541979a851:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/WALs/a0541979a851,34317,1732713811966/a0541979a851%2C34317%2C1732713811966.1732713814301 2024-11-27T13:23:34,379 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46543:46543)] 2024-11-27T13:23:34,379 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:23:34,380 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:34,383 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,384 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,423 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T13:23:34,457 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:34,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:34,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T13:23:34,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:34,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:34,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T13:23:34,470 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:34,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:34,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T13:23:34,475 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:34,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:34,479 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,481 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,489 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T13:23:34,493 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T13:23:34,498 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:23:34,499 INFO [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72284662, jitterRate=0.07712540030479431}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T13:23:34,502 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T13:23:34,503 INFO [master/a0541979a851:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T13:23:34,531 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e262798, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:34,565 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-27T13:23:34,577 INFO [master/a0541979a851:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T13:23:34,577 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T13:23:34,579 INFO [master/a0541979a851:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T13:23:34,581 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-27T13:23:34,585 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-27T13:23:34,586 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T13:23:34,611 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T13:23:34,622 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T13:23:34,624 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-27T13:23:34,626 INFO [master/a0541979a851:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T13:23:34,627 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T13:23:34,629 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-27T13:23:34,631 INFO [master/a0541979a851:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T13:23:34,635 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T13:23:34,637 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-27T13:23:34,638 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T13:23:34,639 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T13:23:34,651 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T13:23:34,652 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T13:23:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T13:23:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T13:23:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,657 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=a0541979a851,34317,1732713811966, sessionid=0x100392645960000, setting cluster-up flag (Was=false) 2024-11-27T13:23:34,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,676 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T13:23:34,678 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a0541979a851,34317,1732713811966 2024-11-27T13:23:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:34,689 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T13:23:34,690 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a0541979a851,34317,1732713811966 2024-11-27T13:23:34,768 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a0541979a851:32819 2024-11-27T13:23:34,769 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1008): ClusterId : e2f87c7f-99d7-4c32-95d4-d56f61e27a72 2024-11-27T13:23:34,773 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T13:23:34,778 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T13:23:34,778 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T13:23:34,781 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T13:23:34,782 DEBUG [RS:0;a0541979a851:32819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d155966, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:34,782 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-27T13:23:34,784 DEBUG [RS:0;a0541979a851:32819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a5d58dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a0541979a851/172.17.0.2:0 2024-11-27T13:23:34,787 INFO [RS:0;a0541979a851:32819 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-27T13:23:34,787 INFO [RS:0;a0541979a851:32819 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-27T13:23:34,787 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-27T13:23:34,788 INFO [master/a0541979a851:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-27T13:23:34,790 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(3073): reportForDuty to master=a0541979a851,34317,1732713811966 with isa=a0541979a851/172.17.0.2:32819, startcode=1732713812705 2024-11-27T13:23:34,791 INFO [master/a0541979a851:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T13:23:34,799 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a0541979a851,34317,1732713811966 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T13:23:34,803 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a0541979a851:0, corePoolSize=5, maxPoolSize=5 2024-11-27T13:23:34,803 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a0541979a851:0, corePoolSize=5, maxPoolSize=5 2024-11-27T13:23:34,804 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a0541979a851:0, corePoolSize=5, maxPoolSize=5 2024-11-27T13:23:34,804 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a0541979a851:0, corePoolSize=5, maxPoolSize=5 2024-11-27T13:23:34,804 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a0541979a851:0, corePoolSize=10, maxPoolSize=10 2024-11-27T13:23:34,804 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,805 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a0541979a851:0, corePoolSize=2, maxPoolSize=2 2024-11-27T13:23:34,805 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,806 DEBUG [RS:0;a0541979a851:32819 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T13:23:34,808 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732713844808 2024-11-27T13:23:34,810 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T13:23:34,810 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-27T13:23:34,811 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-27T13:23:34,811 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T13:23:34,815 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:34,815 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T13:23:34,815 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T13:23:34,816 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T13:23:34,816 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T13:23:34,816 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T13:23:34,817 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,824 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T13:23:34,825 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T13:23:34,826 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T13:23:34,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741831_1007 (size=1039) 2024-11-27T13:23:34,830 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T13:23:34,831 INFO [master/a0541979a851:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T13:23:34,832 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a0541979a851:0:becomeActiveMaster-HFileCleaner.large.0-1732713814832,5,FailOnTimeoutGroup] 2024-11-27T13:23:34,833 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a0541979a851:0:becomeActiveMaster-HFileCleaner.small.0-1732713814833,5,FailOnTimeoutGroup] 2024-11-27T13:23:34,833 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,833 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T13:23:34,834 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,835 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,854 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41075, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T13:23:34,861 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34317 {}] master.ServerManager(332): Checking decommissioned status of RegionServer a0541979a851,32819,1732713812705 2024-11-27T13:23:34,863 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34317 {}] master.ServerManager(486): Registering regionserver=a0541979a851,32819,1732713812705 2024-11-27T13:23:34,878 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:23:34,878 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42217 2024-11-27T13:23:34,878 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-27T13:23:34,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T13:23:34,883 DEBUG [RS:0;a0541979a851:32819 {}] zookeeper.ZKUtil(111): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a0541979a851,32819,1732713812705 2024-11-27T13:23:34,883 WARN [RS:0;a0541979a851:32819 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T13:23:34,884 INFO [RS:0;a0541979a851:32819 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T13:23:34,884 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705 2024-11-27T13:23:34,885 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a0541979a851,32819,1732713812705] 2024-11-27T13:23:34,899 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-27T13:23:34,914 INFO [RS:0;a0541979a851:32819 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T13:23:34,932 INFO [RS:0;a0541979a851:32819 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T13:23:34,935 INFO [RS:0;a0541979a851:32819 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T13:23:34,935 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,936 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-27T13:23:34,945 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,945 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a0541979a851:0, corePoolSize=2, maxPoolSize=2 2024-11-27T13:23:34,946 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,947 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,947 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,947 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,947 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a0541979a851:0, corePoolSize=1, maxPoolSize=1 2024-11-27T13:23:34,947 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a0541979a851:0, corePoolSize=3, maxPoolSize=3 2024-11-27T13:23:34,948 DEBUG [RS:0;a0541979a851:32819 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0, corePoolSize=3, maxPoolSize=3 2024-11-27T13:23:34,949 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,949 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,949 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,949 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:34,949 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,32819,1732713812705-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T13:23:34,978 INFO [RS:0;a0541979a851:32819 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T13:23:34,980 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,32819,1732713812705-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:35,001 INFO [RS:0;a0541979a851:32819 {}] regionserver.Replication(204): a0541979a851,32819,1732713812705 started 2024-11-27T13:23:35,001 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1767): Serving as a0541979a851,32819,1732713812705, RpcServer on a0541979a851/172.17.0.2:32819, sessionid=0x100392645960001 2024-11-27T13:23:35,002 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T13:23:35,002 DEBUG [RS:0;a0541979a851:32819 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a0541979a851,32819,1732713812705 2024-11-27T13:23:35,002 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a0541979a851,32819,1732713812705' 2024-11-27T13:23:35,002 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T13:23:35,003 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a0541979a851,32819,1732713812705 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a0541979a851,32819,1732713812705' 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T13:23:35,004 DEBUG [RS:0;a0541979a851:32819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T13:23:35,005 DEBUG [RS:0;a0541979a851:32819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T13:23:35,005 INFO [RS:0;a0541979a851:32819 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T13:23:35,005 INFO [RS:0;a0541979a851:32819 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T13:23:35,111 INFO [RS:0;a0541979a851:32819 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-27T13:23:35,114 INFO [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a0541979a851%2C32819%2C1732713812705, suffix=, logDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705, archiveDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/oldWALs, maxLogs=32 2024-11-27T13:23:35,132 DEBUG [RS:0;a0541979a851:32819 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705/a0541979a851%2C32819%2C1732713812705.1732713815117, exclude list is [], retry=0 2024-11-27T13:23:35,136 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45727,DS-b417a0b5-a251-40fa-a468-526a7141fcf9,DISK] 2024-11-27T13:23:35,140 INFO [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705/a0541979a851%2C32819%2C1732713812705.1732713815117 2024-11-27T13:23:35,141 DEBUG [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46543:46543)] 2024-11-27T13:23:35,232 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-27T13:23:35,232 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:23:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741833_1009 (size=32) 2024-11-27T13:23:35,645 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:35,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T13:23:35,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T13:23:35,650 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:35,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:35,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T13:23:35,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T13:23:35,654 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:35,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:35,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T13:23:35,657 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T13:23:35,657 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:35,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:35,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740 2024-11-27T13:23:35,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740 2024-11-27T13:23:35,663 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:23:35,666 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-27T13:23:35,670 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:23:35,670 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72954234, jitterRate=0.08710280060768127}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:23:35,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-27T13:23:35,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-27T13:23:35,673 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-27T13:23:35,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-27T13:23:35,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T13:23:35,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T13:23:35,674 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-27T13:23:35,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-27T13:23:35,677 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-27T13:23:35,677 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-27T13:23:35,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T13:23:35,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T13:23:35,694 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T13:23:35,846 DEBUG [a0541979a851:34317 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T13:23:35,851 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:35,857 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a0541979a851,32819,1732713812705, state=OPENING 2024-11-27T13:23:35,863 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T13:23:35,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:35,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:35,866 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T13:23:35,866 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T13:23:35,867 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:23:36,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:36,043 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T13:23:36,046 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T13:23:36,057 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-27T13:23:36,057 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-27T13:23:36,057 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-27T13:23:36,061 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a0541979a851%2C32819%2C1732713812705.meta, suffix=.meta, logDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705, archiveDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/oldWALs, maxLogs=32 2024-11-27T13:23:36,077 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705/a0541979a851%2C32819%2C1732713812705.meta.1732713816062.meta, exclude list is [], retry=0 2024-11-27T13:23:36,081 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45727,DS-b417a0b5-a251-40fa-a468-526a7141fcf9,DISK] 2024-11-27T13:23:36,084 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/WALs/a0541979a851,32819,1732713812705/a0541979a851%2C32819%2C1732713812705.meta.1732713816062.meta 2024-11-27T13:23:36,085 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46543:46543)] 2024-11-27T13:23:36,085 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:23:36,087 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T13:23:36,149 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T13:23:36,154 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T13:23:36,160 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T13:23:36,161 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:36,161 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-27T13:23:36,161 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-27T13:23:36,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T13:23:36,167 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T13:23:36,167 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:36,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:36,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T13:23:36,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T13:23:36,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:36,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:36,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T13:23:36,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T13:23:36,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:36,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T13:23:36,176 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740 2024-11-27T13:23:36,179 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740 2024-11-27T13:23:36,182 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:23:36,185 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-27T13:23:36,187 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66274110, jitterRate=-0.012438803911209106}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:23:36,189 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-27T13:23:36,198 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732713816036 2024-11-27T13:23:36,209 DEBUG [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T13:23:36,210 INFO [RS_OPEN_META-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-27T13:23:36,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:36,213 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a0541979a851,32819,1732713812705, state=OPEN 2024-11-27T13:23:36,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T13:23:36,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T13:23:36,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T13:23:36,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T13:23:36,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T13:23:36,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=a0541979a851,32819,1732713812705 in 351 msec 2024-11-27T13:23:36,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T13:23:36,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 540 msec 2024-11-27T13:23:36,233 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5060 sec 2024-11-27T13:23:36,233 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732713816233, completionTime=-1 2024-11-27T13:23:36,233 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T13:23:36,234 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-27T13:23:36,273 DEBUG [hconnection-0x10bb86e4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:36,275 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:36,287 INFO [master/a0541979a851:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-27T13:23:36,288 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732713876288 2024-11-27T13:23:36,288 INFO [master/a0541979a851:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732713936288 2024-11-27T13:23:36,288 INFO [master/a0541979a851:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 54 msec 2024-11-27T13:23:36,310 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:36,310 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:36,310 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:36,312 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a0541979a851:34317, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:36,312 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:36,318 DEBUG [master/a0541979a851:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-27T13:23:36,320 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-27T13:23:36,322 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T13:23:36,328 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-27T13:23:36,331 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:23:36,332 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:36,334 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:23:36,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741835_1011 (size=358) 2024-11-27T13:23:36,349 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3966f1275e7e6d5ced325aca1684d4b9, NAME => 'hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:23:36,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741836_1012 (size=42) 2024-11-27T13:23:36,760 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:36,760 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3966f1275e7e6d5ced325aca1684d4b9, disabling compactions & flushes 2024-11-27T13:23:36,760 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:36,760 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:36,760 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. after waiting 0 ms 2024-11-27T13:23:36,760 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:36,760 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:36,761 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3966f1275e7e6d5ced325aca1684d4b9: 2024-11-27T13:23:36,763 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:23:36,770 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732713816764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713816764"}]},"ts":"1732713816764"} 2024-11-27T13:23:36,793 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:23:36,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:23:36,798 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713816795"}]},"ts":"1732713816795"} 2024-11-27T13:23:36,802 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-27T13:23:36,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3966f1275e7e6d5ced325aca1684d4b9, ASSIGN}] 2024-11-27T13:23:36,811 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3966f1275e7e6d5ced325aca1684d4b9, ASSIGN 2024-11-27T13:23:36,813 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3966f1275e7e6d5ced325aca1684d4b9, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:23:36,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3966f1275e7e6d5ced325aca1684d4b9, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:36,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3966f1275e7e6d5ced325aca1684d4b9, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:23:37,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:37,128 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:37,128 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3966f1275e7e6d5ced325aca1684d4b9, NAME => 'hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:23:37,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:37,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,129 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,132 INFO [StoreOpener-3966f1275e7e6d5ced325aca1684d4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,134 INFO [StoreOpener-3966f1275e7e6d5ced325aca1684d4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3966f1275e7e6d5ced325aca1684d4b9 columnFamilyName info 2024-11-27T13:23:37,135 DEBUG [StoreOpener-3966f1275e7e6d5ced325aca1684d4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:37,135 INFO [StoreOpener-3966f1275e7e6d5ced325aca1684d4b9-1 {}] regionserver.HStore(327): Store=3966f1275e7e6d5ced325aca1684d4b9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:37,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,141 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:23:37,145 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:23:37,145 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3966f1275e7e6d5ced325aca1684d4b9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68229764, jitterRate=0.016702711582183838}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T13:23:37,147 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3966f1275e7e6d5ced325aca1684d4b9: 2024-11-27T13:23:37,149 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9., pid=6, masterSystemTime=1732713817122 2024-11-27T13:23:37,154 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3966f1275e7e6d5ced325aca1684d4b9, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:37,156 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:37,156 INFO [RS_OPEN_PRIORITY_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:23:37,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T13:23:37,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3966f1275e7e6d5ced325aca1684d4b9, server=a0541979a851,32819,1732713812705 in 190 msec 2024-11-27T13:23:37,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T13:23:37,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3966f1275e7e6d5ced325aca1684d4b9, ASSIGN in 353 msec 2024-11-27T13:23:37,168 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:23:37,169 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713817169"}]},"ts":"1732713817169"} 2024-11-27T13:23:37,172 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-27T13:23:37,177 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:23:37,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 855 msec 2024-11-27T13:23:37,232 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-27T13:23:37,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-27T13:23:37,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:37,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:23:37,265 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-27T13:23:37,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-27T13:23:37,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-27T13:23:37,289 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-27T13:23:37,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-27T13:23:37,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-27T13:23:37,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-27T13:23:37,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-27T13:23:37,318 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.528sec 2024-11-27T13:23:37,319 INFO [master/a0541979a851:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T13:23:37,321 INFO [master/a0541979a851:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T13:23:37,322 INFO [master/a0541979a851:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T13:23:37,322 INFO [master/a0541979a851:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T13:23:37,323 INFO [master/a0541979a851:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T13:23:37,324 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T13:23:37,324 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T13:23:37,330 DEBUG [master/a0541979a851:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-27T13:23:37,331 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T13:23:37,332 INFO [master/a0541979a851:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a0541979a851,34317,1732713811966-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T13:23:37,369 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-27T13:23:37,369 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-27T13:23:37,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:37,381 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-27T13:23:37,381 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-27T13:23:37,392 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:37,401 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44566, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:37,412 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=a0541979a851,34317,1732713811966 2024-11-27T13:23:37,430 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=385, ProcessCount=11, AvailableMemoryMB=4858 2024-11-27T13:23:37,464 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:23:37,471 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:23:37,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:23:37,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:23:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:23:37,486 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:23:37,487 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:37,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-27T13:23:37,489 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:23:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:37,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741837_1013 (size=963) 2024-11-27T13:23:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:37,915 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:23:37,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741838_1014 (size=53) 2024-11-27T13:23:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2b5b15f41df6d1ae2583263f41ba6257, disabling compactions & flushes 2024-11-27T13:23:38,327 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. after waiting 0 ms 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,327 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,327 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:38,329 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:23:38,330 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713818329"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713818329"}]},"ts":"1732713818329"} 2024-11-27T13:23:38,333 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:23:38,335 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:23:38,335 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713818335"}]},"ts":"1732713818335"} 2024-11-27T13:23:38,337 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:23:38,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, ASSIGN}] 2024-11-27T13:23:38,343 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, ASSIGN 2024-11-27T13:23:38,345 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:23:38,496 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=2b5b15f41df6d1ae2583263f41ba6257, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:38,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:23:38,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:38,653 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:38,659 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,660 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:23:38,660 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,660 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:23:38,660 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,660 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,663 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,666 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:23:38,666 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b5b15f41df6d1ae2583263f41ba6257 columnFamilyName A 2024-11-27T13:23:38,666 DEBUG [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:38,667 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(327): Store=2b5b15f41df6d1ae2583263f41ba6257/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:38,668 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,670 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:23:38,670 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b5b15f41df6d1ae2583263f41ba6257 columnFamilyName B 2024-11-27T13:23:38,670 DEBUG [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:38,671 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(327): Store=2b5b15f41df6d1ae2583263f41ba6257/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:38,672 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,674 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:23:38,674 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b5b15f41df6d1ae2583263f41ba6257 columnFamilyName C 2024-11-27T13:23:38,674 DEBUG [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:23:38,675 INFO [StoreOpener-2b5b15f41df6d1ae2583263f41ba6257-1 {}] regionserver.HStore(327): Store=2b5b15f41df6d1ae2583263f41ba6257/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:23:38,675 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,677 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,677 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,680 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:23:38,682 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:38,685 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:23:38,686 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 2b5b15f41df6d1ae2583263f41ba6257; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73452858, jitterRate=0.09453287720680237}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:23:38,687 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:38,688 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., pid=11, masterSystemTime=1732713818653 2024-11-27T13:23:38,691 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,691 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:38,692 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=2b5b15f41df6d1ae2583263f41ba6257, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:23:38,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-27T13:23:38,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 in 196 msec 2024-11-27T13:23:38,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-27T13:23:38,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, ASSIGN in 356 msec 2024-11-27T13:23:38,702 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:23:38,702 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713818702"}]},"ts":"1732713818702"} 2024-11-27T13:23:38,705 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:23:38,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:23:38,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2260 sec 2024-11-27T13:23:39,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-27T13:23:39,618 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-27T13:23:39,623 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-27T13:23:39,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,632 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,635 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:23:39,637 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:23:39,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-27T13:23:39,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,650 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-27T13:23:39,653 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,654 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-27T13:23:39,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-27T13:23:39,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,663 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-27T13:23:39,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-27T13:23:39,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,674 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-27T13:23:39,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,679 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-27T13:23:39,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,684 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-27T13:23:39,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:23:39,690 DEBUG [hconnection-0x3b20eaa4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,690 DEBUG [hconnection-0x45f0f0a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,691 DEBUG [hconnection-0x778c0741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,693 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,695 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,700 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:39,701 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-27T13:23:39,708 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:39,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:39,710 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:39,712 DEBUG [hconnection-0x4b800cbf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:39,714 DEBUG [hconnection-0x4d811003-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,716 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,717 DEBUG [hconnection-0x74328c80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,721 DEBUG [hconnection-0xb857c1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,723 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,724 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,724 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44620, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,730 DEBUG [hconnection-0x14eb5fc7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:23:39,734 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,737 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44622, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,742 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44650, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:23:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:39,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:23:39,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:39,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:39,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:39,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:39,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:39,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:39,874 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:39,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:39,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:39,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:39,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:39,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:39,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7ca45dda8c3e4c0db16a56b501f817ca is 50, key is test_row_0/A:col10/1732713819781/Put/seqid=0 2024-11-27T13:23:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713879959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713879967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713879991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713879992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713879993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:40,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741839_1015 (size=12001) 2024-11-27T13:23:40,076 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:40,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713880154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713880153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713880155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713880156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713880160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:40,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:40,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713880361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713880362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713880361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713880367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713880369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:40,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7ca45dda8c3e4c0db16a56b501f817ca 2024-11-27T13:23:40,555 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:40,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/0545530537ef4c48bcbea29101ba6e04 is 50, key is test_row_0/B:col10/1732713819781/Put/seqid=0 2024-11-27T13:23:40,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741840_1016 (size=12001) 2024-11-27T13:23:40,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/0545530537ef4c48bcbea29101ba6e04 2024-11-27T13:23:40,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/3c3ebce4a2dc4d0394a50ca7a20283b6 is 50, key is test_row_0/C:col10/1732713819781/Put/seqid=0 2024-11-27T13:23:40,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713880668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713880668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713880669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713880676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:40,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713880678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:40,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741841_1017 (size=12001) 2024-11-27T13:23:40,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/3c3ebce4a2dc4d0394a50ca7a20283b6 2024-11-27T13:23:40,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7ca45dda8c3e4c0db16a56b501f817ca as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca 2024-11-27T13:23:40,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:40,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:40,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca, entries=150, sequenceid=16, filesize=11.7 K 2024-11-27T13:23:40,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/0545530537ef4c48bcbea29101ba6e04 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04 2024-11-27T13:23:40,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04, entries=150, sequenceid=16, filesize=11.7 K 2024-11-27T13:23:40,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/3c3ebce4a2dc4d0394a50ca7a20283b6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6 2024-11-27T13:23:40,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6, entries=150, sequenceid=16, filesize=11.7 K 2024-11-27T13:23:40,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 2b5b15f41df6d1ae2583263f41ba6257 in 930ms, sequenceid=16, compaction requested=false 2024-11-27T13:23:40,757 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-27T13:23:40,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:40,868 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:40,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-27T13:23:40,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:40,871 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:23:40,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:40,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:40,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b19b533cd48a4ddb9da75b87d0dee48b is 50, key is test_row_0/A:col10/1732713819977/Put/seqid=0 2024-11-27T13:23:40,905 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T13:23:40,906 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-27T13:23:40,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741842_1018 (size=12001) 2024-11-27T13:23:40,922 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b19b533cd48a4ddb9da75b87d0dee48b 2024-11-27T13:23:40,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/b9919bf354f74e918f9167d2056408a7 is 50, key is test_row_0/B:col10/1732713819977/Put/seqid=0 2024-11-27T13:23:40,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741843_1019 (size=12001) 2024-11-27T13:23:40,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/b9919bf354f74e918f9167d2056408a7 2024-11-27T13:23:41,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/21455a60c885466caff1479e6bba3dee is 50, key is test_row_0/C:col10/1732713819977/Put/seqid=0 2024-11-27T13:23:41,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741844_1020 (size=12001) 2024-11-27T13:23:41,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:41,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713881199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713881196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713881202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713881207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713881208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713881311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713881312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713881314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713881317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713881318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,461 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/21455a60c885466caff1479e6bba3dee 2024-11-27T13:23:41,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b19b533cd48a4ddb9da75b87d0dee48b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b 2024-11-27T13:23:41,504 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:23:41,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/b9919bf354f74e918f9167d2056408a7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7 2024-11-27T13:23:41,523 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:23:41,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/21455a60c885466caff1479e6bba3dee as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee 2024-11-27T13:23:41,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713881520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713881521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713881522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713881523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713881523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,540 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:23:41,542 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 2b5b15f41df6d1ae2583263f41ba6257 in 670ms, sequenceid=38, compaction requested=false 2024-11-27T13:23:41,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:41,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:41,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-27T13:23:41,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-27T13:23:41,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-27T13:23:41,553 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8330 sec 2024-11-27T13:23:41,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.8530 sec 2024-11-27T13:23:41,673 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:23:41,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-27T13:23:41,830 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-27T13:23:41,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:41,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-27T13:23:41,840 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:41,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:41,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:41,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T13:23:41,841 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:41,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:41,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:41,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:41,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:41,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:41,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:41,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:41,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/279f1e117fd94748985a858290c6ee6f is 50, key is test_row_0/A:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:41,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741845_1021 (size=16681) 2024-11-27T13:23:41,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/279f1e117fd94748985a858290c6ee6f 2024-11-27T13:23:41,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/d36cb9932e7245839d8ef3fc8385973d is 50, key is test_row_0/B:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:41,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741846_1022 (size=12001) 2024-11-27T13:23:41,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/d36cb9932e7245839d8ef3fc8385973d 2024-11-27T13:23:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713881894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713881898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/fff1b25b9bfd4749b5325e3e37984cc3 is 50, key is test_row_0/C:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:41,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713881920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713881948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713881956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:41,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741847_1023 (size=12001) 2024-11-27T13:23:41,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/fff1b25b9bfd4749b5325e3e37984cc3 2024-11-27T13:23:41,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:41,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:41,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:41,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:41,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:41,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:41,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/279f1e117fd94748985a858290c6ee6f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f 2024-11-27T13:23:42,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f, entries=250, sequenceid=55, filesize=16.3 K 2024-11-27T13:23:42,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/d36cb9932e7245839d8ef3fc8385973d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d 2024-11-27T13:23:42,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713882023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:23:42,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/fff1b25b9bfd4749b5325e3e37984cc3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3 2024-11-27T13:23:42,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713882034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:23:42,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for 2b5b15f41df6d1ae2583263f41ba6257 in 210ms, sequenceid=55, compaction requested=true 2024-11-27T13:23:42,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:42,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:42,063 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:42,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:42,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,068 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:42,069 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:42,070 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:42,070 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,072 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.2 K 2024-11-27T13:23:42,075 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:42,075 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:42,075 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0545530537ef4c48bcbea29101ba6e04, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713819781 2024-11-27T13:23:42,075 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,076 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=39.7 K 2024-11-27T13:23:42,076 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ca45dda8c3e4c0db16a56b501f817ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713819781 2024-11-27T13:23:42,077 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b9919bf354f74e918f9167d2056408a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713819958 2024-11-27T13:23:42,077 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b19b533cd48a4ddb9da75b87d0dee48b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713819958 2024-11-27T13:23:42,078 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d36cb9932e7245839d8ef3fc8385973d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:42,078 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 279f1e117fd94748985a858290c6ee6f, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:42,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bef83f31cb094b7a92787c0f1ba0f78e is 50, key is test_row_0/A:col10/1732713822061/Put/seqid=0 2024-11-27T13:23:42,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741848_1024 (size=12001) 2024-11-27T13:23:42,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713882123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713882126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713882128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:42,148 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#11 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:42,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,158 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#10 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:42,159 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/317b1dbae93a424a915fb0518f0ba360 is 50, key is test_row_0/A:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:42,160 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/231cfbcafc8e4ee2822e59b56a03d4ed is 50, key is test_row_0/B:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:42,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741849_1025 (size=12104) 2024-11-27T13:23:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741850_1026 (size=12104) 2024-11-27T13:23:42,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713882232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713882237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713882239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713882240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713882251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:42,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713882446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713882449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713882452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,460 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T13:23:42,460 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-27T13:23:42,462 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-27T13:23:42,462 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-27T13:23:42,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T13:23:42,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-27T13:23:42,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-27T13:23:42,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-27T13:23:42,466 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-27T13:23:42,466 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-27T13:23:42,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bef83f31cb094b7a92787c0f1ba0f78e 2024-11-27T13:23:42,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713882537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/48b60cbc74884b039bc7d576649be4df is 50, key is test_row_0/B:col10/1732713822061/Put/seqid=0 2024-11-27T13:23:42,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713882561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741851_1027 (size=12001) 2024-11-27T13:23:42,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/48b60cbc74884b039bc7d576649be4df 2024-11-27T13:23:42,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/486db83cb0ee40f2a96bf137528ba72c is 50, key is test_row_0/C:col10/1732713822061/Put/seqid=0 2024-11-27T13:23:42,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,629 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/317b1dbae93a424a915fb0518f0ba360 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/317b1dbae93a424a915fb0518f0ba360 2024-11-27T13:23:42,635 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/231cfbcafc8e4ee2822e59b56a03d4ed as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/231cfbcafc8e4ee2822e59b56a03d4ed 2024-11-27T13:23:42,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741852_1028 (size=12001) 2024-11-27T13:23:42,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/486db83cb0ee40f2a96bf137528ba72c 2024-11-27T13:23:42,653 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 317b1dbae93a424a915fb0518f0ba360(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:42,653 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:42,653 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713822052; duration=0sec 2024-11-27T13:23:42,654 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:42,654 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:42,654 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:42,657 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:42,657 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:42,657 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,658 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.2 K 2024-11-27T13:23:42,658 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c3ebce4a2dc4d0394a50ca7a20283b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713819781 2024-11-27T13:23:42,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21455a60c885466caff1479e6bba3dee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713819958 2024-11-27T13:23:42,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bef83f31cb094b7a92787c0f1ba0f78e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e 2024-11-27T13:23:42,662 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fff1b25b9bfd4749b5325e3e37984cc3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:42,667 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 231cfbcafc8e4ee2822e59b56a03d4ed(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:42,667 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:42,667 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713822063; duration=0sec 2024-11-27T13:23:42,667 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:42,667 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:42,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e, entries=150, sequenceid=79, filesize=11.7 K 2024-11-27T13:23:42,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/48b60cbc74884b039bc7d576649be4df as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df 2024-11-27T13:23:42,694 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#14 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:42,695 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9f769a3eef834840abcb38a51ddc11bd is 50, key is test_row_0/C:col10/1732713821839/Put/seqid=0 2024-11-27T13:23:42,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df, entries=150, sequenceid=79, filesize=11.7 K 2024-11-27T13:23:42,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/486db83cb0ee40f2a96bf137528ba72c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c 2024-11-27T13:23:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741853_1029 (size=12104) 2024-11-27T13:23:42,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c, entries=150, sequenceid=79, filesize=11.7 K 2024-11-27T13:23:42,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 2b5b15f41df6d1ae2583263f41ba6257 in 653ms, sequenceid=79, compaction requested=false 2024-11-27T13:23:42,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:42,725 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9f769a3eef834840abcb38a51ddc11bd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9f769a3eef834840abcb38a51ddc11bd 2024-11-27T13:23:42,738 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 9f769a3eef834840abcb38a51ddc11bd(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:42,738 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:42,739 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713822063; duration=0sec 2024-11-27T13:23:42,739 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:42,739 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:42,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:23:42,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:42,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:42,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:42,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:42,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/15178bbebec34fa198dd3f79177dd2e5 is 50, key is test_row_0/A:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:42,780 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741854_1030 (size=12001) 2024-11-27T13:23:42,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/15178bbebec34fa198dd3f79177dd2e5 2024-11-27T13:23:42,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713882826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c2cb1252e3704cc6954b617dda18be2a is 50, key is test_row_0/B:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713882832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713882832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741855_1031 (size=12001) 2024-11-27T13:23:42,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c2cb1252e3704cc6954b617dda18be2a 2024-11-27T13:23:42,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8ad5d83226c7484f8baa3e9e229fb2dc is 50, key is test_row_0/C:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:42,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741856_1032 (size=12001) 2024-11-27T13:23:42,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8ad5d83226c7484f8baa3e9e229fb2dc 2024-11-27T13:23:42,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:42,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:42,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:42,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:42,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713882937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:42,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713882942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713882942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:42,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/15178bbebec34fa198dd3f79177dd2e5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5 2024-11-27T13:23:42,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5, entries=150, sequenceid=97, filesize=11.7 K 2024-11-27T13:23:42,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c2cb1252e3704cc6954b617dda18be2a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a 2024-11-27T13:23:42,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a, entries=150, sequenceid=97, filesize=11.7 K 2024-11-27T13:23:42,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8ad5d83226c7484f8baa3e9e229fb2dc as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc 2024-11-27T13:23:42,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc, entries=150, sequenceid=97, filesize=11.7 K 2024-11-27T13:23:43,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 2b5b15f41df6d1ae2583263f41ba6257 in 245ms, sequenceid=97, compaction requested=true 2024-11-27T13:23:43,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:43,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:43,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:43,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:43,004 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:43,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:43,007 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:43,007 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:43,007 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:43,007 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:43,007 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,007 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,008 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/317b1dbae93a424a915fb0518f0ba360, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.3 K 2024-11-27T13:23:43,008 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/231cfbcafc8e4ee2822e59b56a03d4ed, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.3 K 2024-11-27T13:23:43,009 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 231cfbcafc8e4ee2822e59b56a03d4ed, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:43,009 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 317b1dbae93a424a915fb0518f0ba360, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:43,010 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bef83f31cb094b7a92787c0f1ba0f78e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732713822050 2024-11-27T13:23:43,010 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 48b60cbc74884b039bc7d576649be4df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732713822050 2024-11-27T13:23:43,010 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15178bbebec34fa198dd3f79177dd2e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:43,013 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c2cb1252e3704cc6954b617dda18be2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:43,044 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:43,045 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/a4224da74cab4325b28e7e0e473cb91c is 50, key is test_row_0/A:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:43,052 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#19 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:43,053 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/b998eb9fae874cd6b231028f81bdd6a4 is 50, key is test_row_0/B:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:43,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:23:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:43,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741858_1034 (size=12207) 2024-11-27T13:23:43,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:43,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:43,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:43,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741857_1033 (size=12207) 2024-11-27T13:23:43,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713883089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cc3af2d0ca67416cbdbb2bdb7805d17b is 50, key is test_row_0/A:col10/1732713823050/Put/seqid=0 2024-11-27T13:23:43,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713883099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,125 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/b998eb9fae874cd6b231028f81bdd6a4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b998eb9fae874cd6b231028f81bdd6a4 2024-11-27T13:23:43,127 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/a4224da74cab4325b28e7e0e473cb91c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/a4224da74cab4325b28e7e0e473cb91c 2024-11-27T13:23:43,143 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into a4224da74cab4325b28e7e0e473cb91c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:43,143 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:43,144 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713823004; duration=0sec 2024-11-27T13:23:43,144 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:43,144 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:43,144 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into b998eb9fae874cd6b231028f81bdd6a4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:43,144 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:43,144 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:43,144 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713823004; duration=0sec 2024-11-27T13:23:43,145 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:43,145 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:43,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713883141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,148 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:43,149 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:43,149 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741859_1035 (size=12001) 2024-11-27T13:23:43,149 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9f769a3eef834840abcb38a51ddc11bd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.3 K 2024-11-27T13:23:43,150 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f769a3eef834840abcb38a51ddc11bd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713821196 2024-11-27T13:23:43,151 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 486db83cb0ee40f2a96bf137528ba72c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732713822050 2024-11-27T13:23:43,151 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ad5d83226c7484f8baa3e9e229fb2dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:43,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cc3af2d0ca67416cbdbb2bdb7805d17b 2024-11-27T13:23:43,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713883152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713883153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/94f73969940f42e7bee05882e15c62b5 is 50, key is test_row_0/B:col10/1732713823050/Put/seqid=0 2024-11-27T13:23:43,190 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#22 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:43,191 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/b23110d16a004fb192eb060a794967ff is 50, key is test_row_0/C:col10/1732713822756/Put/seqid=0 2024-11-27T13:23:43,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713883201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713883212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741860_1036 (size=12001) 2024-11-27T13:23:43,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/94f73969940f42e7bee05882e15c62b5 2024-11-27T13:23:43,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741861_1037 (size=12207) 2024-11-27T13:23:43,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:43,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:43,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:43,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:43,256 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/b23110d16a004fb192eb060a794967ff as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b23110d16a004fb192eb060a794967ff 2024-11-27T13:23:43,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/94b80acda74a4a57ad6e07ee232387f6 is 50, key is test_row_0/C:col10/1732713823050/Put/seqid=0 2024-11-27T13:23:43,273 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into b23110d16a004fb192eb060a794967ff(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:43,273 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:43,273 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713823005; duration=0sec 2024-11-27T13:23:43,273 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:43,273 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:43,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741862_1038 (size=12001) 2024-11-27T13:23:43,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/94b80acda74a4a57ad6e07ee232387f6 2024-11-27T13:23:43,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cc3af2d0ca67416cbdbb2bdb7805d17b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b 2024-11-27T13:23:43,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b, entries=150, sequenceid=120, filesize=11.7 K 2024-11-27T13:23:43,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/94f73969940f42e7bee05882e15c62b5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5 2024-11-27T13:23:43,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5, entries=150, sequenceid=120, filesize=11.7 K 2024-11-27T13:23:43,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/94b80acda74a4a57ad6e07ee232387f6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6 2024-11-27T13:23:43,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6, entries=150, sequenceid=120, filesize=11.7 K 2024-11-27T13:23:43,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 2b5b15f41df6d1ae2583263f41ba6257 in 304ms, sequenceid=120, compaction requested=false 2024-11-27T13:23:43,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:43,405 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:43,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-27T13:23:43,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:43,408 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:23:43,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:43,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:43,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:43,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e74a2d900d5e42878300cae752dffbe6 is 50, key is test_row_0/A:col10/1732713823091/Put/seqid=0 2024-11-27T13:23:43,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741863_1039 (size=12151) 2024-11-27T13:23:43,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e74a2d900d5e42878300cae752dffbe6 2024-11-27T13:23:43,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/24ec0d1e605c4900b4b4a279cfa057e6 is 50, key is test_row_0/B:col10/1732713823091/Put/seqid=0 2024-11-27T13:23:43,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713883477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713883479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713883482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713883484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713883484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741864_1040 (size=12151) 2024-11-27T13:23:43,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713883586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713883587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713883590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713883590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713883591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713883792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713883793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713883793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713883795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713883794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:43,913 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/24ec0d1e605c4900b4b4a279cfa057e6 2024-11-27T13:23:43,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/bafbf99522e2480eac18756d0d6d8cf3 is 50, key is test_row_0/C:col10/1732713823091/Put/seqid=0 2024-11-27T13:23:43,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:43,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741865_1041 (size=12151) 2024-11-27T13:23:44,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713884098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713884102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713884104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713884105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713884106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,376 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/bafbf99522e2480eac18756d0d6d8cf3 2024-11-27T13:23:44,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e74a2d900d5e42878300cae752dffbe6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6 2024-11-27T13:23:44,400 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6, entries=150, sequenceid=136, filesize=11.9 K 2024-11-27T13:23:44,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/24ec0d1e605c4900b4b4a279cfa057e6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6 2024-11-27T13:23:44,422 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6, entries=150, sequenceid=136, filesize=11.9 K 2024-11-27T13:23:44,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/bafbf99522e2480eac18756d0d6d8cf3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3 2024-11-27T13:23:44,437 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3, entries=150, sequenceid=136, filesize=11.9 K 2024-11-27T13:23:44,441 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 2b5b15f41df6d1ae2583263f41ba6257 in 1033ms, sequenceid=136, compaction requested=true 2024-11-27T13:23:44,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:44,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:44,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-27T13:23:44,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-27T13:23:44,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-27T13:23:44,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6030 sec 2024-11-27T13:23:44,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.6130 sec 2024-11-27T13:23:44,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:23:44,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:44,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:44,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:44,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:44,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/06e6f2716c25495d93670217fa7db097 is 50, key is test_row_0/A:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713884619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713884621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713884623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713884626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713884628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741866_1042 (size=14541) 2024-11-27T13:23:44,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713884730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713884730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713884732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713884733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713884733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713884936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713884937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713884937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713884938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:44,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:44,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713884939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/06e6f2716c25495d93670217fa7db097 2024-11-27T13:23:45,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/64d156a6b9364479a17a5cb4cb5012ce is 50, key is test_row_0/B:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:45,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741867_1043 (size=12151) 2024-11-27T13:23:45,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/64d156a6b9364479a17a5cb4cb5012ce 2024-11-27T13:23:45,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff9a99fa0af74cf4a1694f3fa37184c6 is 50, key is test_row_0/C:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:45,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741868_1044 (size=12151) 2024-11-27T13:23:45,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713885241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713885241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713885242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713885241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713885242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff9a99fa0af74cf4a1694f3fa37184c6 2024-11-27T13:23:45,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/06e6f2716c25495d93670217fa7db097 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097 2024-11-27T13:23:45,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097, entries=200, sequenceid=161, filesize=14.2 K 2024-11-27T13:23:45,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/64d156a6b9364479a17a5cb4cb5012ce as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce 2024-11-27T13:23:45,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce, entries=150, sequenceid=161, filesize=11.9 K 2024-11-27T13:23:45,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff9a99fa0af74cf4a1694f3fa37184c6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6 2024-11-27T13:23:45,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6, entries=150, sequenceid=161, filesize=11.9 K 2024-11-27T13:23:45,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2b5b15f41df6d1ae2583263f41ba6257 in 990ms, sequenceid=161, compaction requested=true 2024-11-27T13:23:45,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:45,599 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:45,599 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:45,601 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50900 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:45,602 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:45,602 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:45,602 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/a4224da74cab4325b28e7e0e473cb91c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=49.7 K 2024-11-27T13:23:45,602 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:45,602 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:45,602 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:45,603 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b998eb9fae874cd6b231028f81bdd6a4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=47.4 K 2024-11-27T13:23:45,603 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b998eb9fae874cd6b231028f81bdd6a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:45,604 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4224da74cab4325b28e7e0e473cb91c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:45,604 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 94f73969940f42e7bee05882e15c62b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713822807 2024-11-27T13:23:45,604 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc3af2d0ca67416cbdbb2bdb7805d17b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713822807 2024-11-27T13:23:45,605 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 24ec0d1e605c4900b4b4a279cfa057e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732713823077 2024-11-27T13:23:45,605 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 64d156a6b9364479a17a5cb4cb5012ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823480 2024-11-27T13:23:45,606 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e74a2d900d5e42878300cae752dffbe6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732713823077 2024-11-27T13:23:45,607 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06e6f2716c25495d93670217fa7db097, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823472 2024-11-27T13:23:45,640 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#30 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:45,641 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/8b7f358acc2243b49101ba8ac97b424a is 50, key is test_row_0/B:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:45,648 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:45,649 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0931184f20134207aea79d2662c55d93 is 50, key is test_row_0/A:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:45,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741869_1045 (size=12493) 2024-11-27T13:23:45,686 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/8b7f358acc2243b49101ba8ac97b424a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/8b7f358acc2243b49101ba8ac97b424a 2024-11-27T13:23:45,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741870_1046 (size=12493) 2024-11-27T13:23:45,700 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 8b7f358acc2243b49101ba8ac97b424a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:45,700 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:45,701 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=12, startTime=1732713825599; duration=0sec 2024-11-27T13:23:45,701 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:45,701 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:45,701 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:45,705 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:45,705 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:45,705 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:45,706 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b23110d16a004fb192eb060a794967ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=47.4 K 2024-11-27T13:23:45,706 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b23110d16a004fb192eb060a794967ff, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732713822119 2024-11-27T13:23:45,708 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 94b80acda74a4a57ad6e07ee232387f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713822807 2024-11-27T13:23:45,708 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bafbf99522e2480eac18756d0d6d8cf3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1732713823077 2024-11-27T13:23:45,710 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ff9a99fa0af74cf4a1694f3fa37184c6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823480 2024-11-27T13:23:45,711 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0931184f20134207aea79d2662c55d93 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0931184f20134207aea79d2662c55d93 2024-11-27T13:23:45,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 0931184f20134207aea79d2662c55d93(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:45,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:45,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=12, startTime=1732713825598; duration=0sec 2024-11-27T13:23:45,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:45,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:45,739 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#32 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:45,740 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/fd92c336fe4d413fba1d6655e0d1570c is 50, key is test_row_0/C:col10/1732713824606/Put/seqid=0 2024-11-27T13:23:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:45,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:23:45,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:45,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:45,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:45,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:45,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:45,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:45,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741871_1047 (size=12493) 2024-11-27T13:23:45,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/472e23c51a4a4db39e0b4c425eecbba2 is 50, key is test_row_0/A:col10/1732713825751/Put/seqid=0 2024-11-27T13:23:45,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741872_1048 (size=12151) 2024-11-27T13:23:45,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/472e23c51a4a4db39e0b4c425eecbba2 2024-11-27T13:23:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713885792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713885797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713885797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713885799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713885802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a6ae6861db204beea807eadaab25c5bb is 50, key is test_row_0/B:col10/1732713825751/Put/seqid=0 2024-11-27T13:23:45,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741873_1049 (size=12151) 2024-11-27T13:23:45,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713885904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713885907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713885907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713885909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713885909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-27T13:23:45,950 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-27T13:23:45,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-27T13:23:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:45,955 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:45,956 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:45,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:46,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:46,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:46,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T13:23:46,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:46,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713886110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713886112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713886113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713886113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713886114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,178 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/fd92c336fe4d413fba1d6655e0d1570c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fd92c336fe4d413fba1d6655e0d1570c 2024-11-27T13:23:46,189 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into fd92c336fe4d413fba1d6655e0d1570c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:46,190 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,190 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=12, startTime=1732713825599; duration=0sec 2024-11-27T13:23:46,190 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:46,190 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:46,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a6ae6861db204beea807eadaab25c5bb 2024-11-27T13:23:46,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/03cb92e7a94d4eaaa508507b7a323b8f is 50, key is test_row_0/C:col10/1732713825751/Put/seqid=0 2024-11-27T13:23:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:46,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741874_1050 (size=12151) 2024-11-27T13:23:46,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/03cb92e7a94d4eaaa508507b7a323b8f 2024-11-27T13:23:46,267 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:46,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T13:23:46,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:46,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,268 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/472e23c51a4a4db39e0b4c425eecbba2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2 2024-11-27T13:23:46,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2, entries=150, sequenceid=177, filesize=11.9 K 2024-11-27T13:23:46,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a6ae6861db204beea807eadaab25c5bb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb 2024-11-27T13:23:46,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb, entries=150, sequenceid=177, filesize=11.9 K 2024-11-27T13:23:46,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/03cb92e7a94d4eaaa508507b7a323b8f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f 2024-11-27T13:23:46,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f, entries=150, sequenceid=177, filesize=11.9 K 2024-11-27T13:23:46,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2b5b15f41df6d1ae2583263f41ba6257 in 560ms, sequenceid=177, compaction requested=false 2024-11-27T13:23:46,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:46,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:23:46,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:46,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:46,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:46,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:46,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T13:23:46,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:46,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b is 50, key is test_row_0/A:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713886441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741875_1051 (size=16931) 2024-11-27T13:23:46,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713886445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713886445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713886446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713886448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b 2024-11-27T13:23:46,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e3307c280da548059beed50a59698182 is 50, key is test_row_0/B:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741876_1052 (size=12151) 2024-11-27T13:23:46,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e3307c280da548059beed50a59698182 2024-11-27T13:23:46,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/48045c300a72423cae35611940a3a939 is 50, key is test_row_0/C:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741877_1053 (size=12151) 2024-11-27T13:23:46,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/48045c300a72423cae35611940a3a939 2024-11-27T13:23:46,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713886548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713886552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713886553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713886554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713886555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:46,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b 2024-11-27T13:23:46,576 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:46,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T13:23:46,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:46,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:46,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b, entries=250, sequenceid=202, filesize=16.5 K 2024-11-27T13:23:46,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e3307c280da548059beed50a59698182 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182 2024-11-27T13:23:46,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182, entries=150, sequenceid=202, filesize=11.9 K 2024-11-27T13:23:46,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/48045c300a72423cae35611940a3a939 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939 2024-11-27T13:23:46,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939, entries=150, sequenceid=202, filesize=11.9 K 2024-11-27T13:23:46,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 2b5b15f41df6d1ae2583263f41ba6257 in 195ms, sequenceid=202, compaction requested=true 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:46,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:46,614 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:46,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:46,616 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:46,616 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:46,616 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,616 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0931184f20134207aea79d2662c55d93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=40.6 K 2024-11-27T13:23:46,617 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0931184f20134207aea79d2662c55d93, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823480 2024-11-27T13:23:46,617 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:46,617 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:46,618 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,618 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/8b7f358acc2243b49101ba8ac97b424a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.9 K 2024-11-27T13:23:46,618 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 472e23c51a4a4db39e0b4c425eecbba2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732713824619 2024-11-27T13:23:46,618 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b7f358acc2243b49101ba8ac97b424a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823480 2024-11-27T13:23:46,619 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f3f5dd0e1ef4116ad9ecc8a5ae84c1b, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:46,619 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6ae6861db204beea807eadaab25c5bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732713824619 2024-11-27T13:23:46,620 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3307c280da548059beed50a59698182, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:46,642 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:46,643 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/6fe3024f53f24576a692e0a99dcf1d68 is 50, key is test_row_0/A:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,649 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:46,650 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/95df8d94d3404a9d85adf329824d8413 is 50, key is test_row_0/B:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741878_1054 (size=12595) 2024-11-27T13:23:46,677 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/6fe3024f53f24576a692e0a99dcf1d68 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/6fe3024f53f24576a692e0a99dcf1d68 2024-11-27T13:23:46,688 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 6fe3024f53f24576a692e0a99dcf1d68(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:46,688 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,688 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713826614; duration=0sec 2024-11-27T13:23:46,688 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:46,688 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:46,688 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:46,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741879_1055 (size=12595) 2024-11-27T13:23:46,693 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:46,693 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:46,693 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,693 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fd92c336fe4d413fba1d6655e0d1570c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=35.9 K 2024-11-27T13:23:46,694 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fd92c336fe4d413fba1d6655e0d1570c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732713823480 2024-11-27T13:23:46,695 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 03cb92e7a94d4eaaa508507b7a323b8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1732713824619 2024-11-27T13:23:46,696 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 48045c300a72423cae35611940a3a939, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:46,701 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/95df8d94d3404a9d85adf329824d8413 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/95df8d94d3404a9d85adf329824d8413 2024-11-27T13:23:46,712 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 95df8d94d3404a9d85adf329824d8413(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:46,712 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,712 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713826614; duration=0sec 2024-11-27T13:23:46,712 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:46,712 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:46,715 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#41 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:46,716 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a3cde8669d4a40b0bde5279fdf13a762 is 50, key is test_row_0/C:col10/1732713826418/Put/seqid=0 2024-11-27T13:23:46,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:46,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-27T13:23:46,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:46,731 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:46,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:46,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:46,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741880_1056 (size=12595) 2024-11-27T13:23:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 is 50, key is test_row_0/A:col10/1732713826443/Put/seqid=0 2024-11-27T13:23:46,771 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a3cde8669d4a40b0bde5279fdf13a762 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a3cde8669d4a40b0bde5279fdf13a762 2024-11-27T13:23:46,780 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into a3cde8669d4a40b0bde5279fdf13a762(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:46,780 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:46,780 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713826614; duration=0sec 2024-11-27T13:23:46,780 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:46,780 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:46,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713886797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713886798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713886799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713886804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713886804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741881_1057 (size=12151) 2024-11-27T13:23:46,815 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 2024-11-27T13:23:46,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/3b8ed677b79d4b978eff5d7587cd2d78 is 50, key is test_row_0/B:col10/1732713826443/Put/seqid=0 2024-11-27T13:23:46,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741882_1058 (size=12151) 2024-11-27T13:23:46,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713886907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713886906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713886907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713886910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:46,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713886911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:47,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713887112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713887114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713887115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713887116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713887116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,253 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/3b8ed677b79d4b978eff5d7587cd2d78 2024-11-27T13:23:47,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/d3efab72dedf4c04b0e68615b2e130b5 is 50, key is test_row_0/C:col10/1732713826443/Put/seqid=0 2024-11-27T13:23:47,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741883_1059 (size=12151) 2024-11-27T13:23:47,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713887418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713887419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713887420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713887420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713887429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,681 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/d3efab72dedf4c04b0e68615b2e130b5 2024-11-27T13:23:47,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 2024-11-27T13:23:47,700 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1, entries=150, sequenceid=217, filesize=11.9 K 2024-11-27T13:23:47,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/3b8ed677b79d4b978eff5d7587cd2d78 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78 2024-11-27T13:23:47,709 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78, entries=150, sequenceid=217, filesize=11.9 K 2024-11-27T13:23:47,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/d3efab72dedf4c04b0e68615b2e130b5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5 2024-11-27T13:23:47,725 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5, entries=150, sequenceid=217, filesize=11.9 K 2024-11-27T13:23:47,727 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 2b5b15f41df6d1ae2583263f41ba6257 in 996ms, sequenceid=217, compaction requested=false 2024-11-27T13:23:47,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:47,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:47,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-27T13:23:47,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-27T13:23:47,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-27T13:23:47,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-11-27T13:23:47,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.7810 sec 2024-11-27T13:23:47,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:47,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:47,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:47,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/533588c3ceca4431868c9b856dcc216a is 50, key is test_row_0/A:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:47,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713887949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713887949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713887951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713887952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713887952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741884_1060 (size=12151) 2024-11-27T13:23:47,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/533588c3ceca4431868c9b856dcc216a 2024-11-27T13:23:47,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6b350bcca7af486cae4e4596498a4e0d is 50, key is test_row_0/B:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:48,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741885_1061 (size=12151) 2024-11-27T13:23:48,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6b350bcca7af486cae4e4596498a4e0d 2024-11-27T13:23:48,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/4aabe551828c4d9eb2658521538c2b82 is 50, key is test_row_0/C:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:48,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713888057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713888059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713888059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713888060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741886_1062 (size=12151) 2024-11-27T13:23:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-27T13:23:48,064 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-27T13:23:48,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/4aabe551828c4d9eb2658521538c2b82 2024-11-27T13:23:48,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713888060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,066 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:48,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-27T13:23:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T13:23:48,069 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:48,072 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:48,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:48,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/533588c3ceca4431868c9b856dcc216a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a 2024-11-27T13:23:48,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a, entries=150, sequenceid=242, filesize=11.9 K 2024-11-27T13:23:48,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6b350bcca7af486cae4e4596498a4e0d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d 2024-11-27T13:23:48,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d, entries=150, sequenceid=242, filesize=11.9 K 2024-11-27T13:23:48,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/4aabe551828c4d9eb2658521538c2b82 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82 2024-11-27T13:23:48,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82, entries=150, sequenceid=242, filesize=11.9 K 2024-11-27T13:23:48,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 2b5b15f41df6d1ae2583263f41ba6257 in 187ms, sequenceid=242, compaction requested=true 2024-11-27T13:23:48,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:48,114 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:48,114 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:48,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:48,115 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:48,115 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:48,116 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:48,116 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/6fe3024f53f24576a692e0a99dcf1d68, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.0 K 2024-11-27T13:23:48,116 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:48,116 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:48,116 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:48,117 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/95df8d94d3404a9d85adf329824d8413, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.0 K 2024-11-27T13:23:48,118 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fe3024f53f24576a692e0a99dcf1d68, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:48,118 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 95df8d94d3404a9d85adf329824d8413, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:48,118 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8c14504f93d4de9ae1c2e6a33a2d0b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732713826435 2024-11-27T13:23:48,119 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8ed677b79d4b978eff5d7587cd2d78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732713826435 2024-11-27T13:23:48,119 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 533588c3ceca4431868c9b856dcc216a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:48,120 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b350bcca7af486cae4e4596498a4e0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:48,154 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:48,156 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:48,157 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/2124c083881c4f9da0520ce46e99e173 is 50, key is test_row_0/B:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:48,159 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/46f7f8981d174e4c8e8e2f79319d4a9f is 50, key is test_row_0/A:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T13:23:48,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741888_1064 (size=12697) 2024-11-27T13:23:48,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741887_1063 (size=12697) 2024-11-27T13:23:48,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:48,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-27T13:23:48,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:48,232 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d9c730ff2a9d4fd68abafc8b4051ae96 is 50, key is test_row_0/A:col10/1732713827949/Put/seqid=0 2024-11-27T13:23:48,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:48,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:48,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741889_1065 (size=12151) 2024-11-27T13:23:48,272 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d9c730ff2a9d4fd68abafc8b4051ae96 2024-11-27T13:23:48,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a2e34151755b48628e1e6f316b090666 is 50, key is test_row_0/B:col10/1732713827949/Put/seqid=0 2024-11-27T13:23:48,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741890_1066 (size=12151) 2024-11-27T13:23:48,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713888302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713888303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713888304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713888306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713888309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T13:23:48,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713888411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713888411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713888412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713888413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713888413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713888615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713888616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713888618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713888620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713888620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,625 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/2124c083881c4f9da0520ce46e99e173 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/2124c083881c4f9da0520ce46e99e173 2024-11-27T13:23:48,628 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/46f7f8981d174e4c8e8e2f79319d4a9f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/46f7f8981d174e4c8e8e2f79319d4a9f 2024-11-27T13:23:48,638 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 46f7f8981d174e4c8e8e2f79319d4a9f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:48,638 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:48,638 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 2124c083881c4f9da0520ce46e99e173(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:48,638 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713828113; duration=0sec 2024-11-27T13:23:48,638 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:48,638 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713828114; duration=0sec 2024-11-27T13:23:48,639 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:48,639 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:48,639 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:48,639 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:48,639 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:48,641 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:48,641 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:48,641 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:48,641 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a3cde8669d4a40b0bde5279fdf13a762, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.0 K 2024-11-27T13:23:48,642 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3cde8669d4a40b0bde5279fdf13a762, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732713825785 2024-11-27T13:23:48,643 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3efab72dedf4c04b0e68615b2e130b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732713826435 2024-11-27T13:23:48,643 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4aabe551828c4d9eb2658521538c2b82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:48,666 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:48,667 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/122b33ddf7b54d9eba39992487aaabc2 is 50, key is test_row_0/C:col10/1732713827924/Put/seqid=0 2024-11-27T13:23:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T13:23:48,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741891_1067 (size=12697) 2024-11-27T13:23:48,694 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a2e34151755b48628e1e6f316b090666 2024-11-27T13:23:48,707 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/122b33ddf7b54d9eba39992487aaabc2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/122b33ddf7b54d9eba39992487aaabc2 2024-11-27T13:23:48,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9db48292456645919b6439df6a78e9cf is 50, key is test_row_0/C:col10/1732713827949/Put/seqid=0 2024-11-27T13:23:48,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 122b33ddf7b54d9eba39992487aaabc2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:48,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:48,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713828114; duration=0sec 2024-11-27T13:23:48,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:48,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:48,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741892_1068 (size=12151) 2024-11-27T13:23:48,751 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9db48292456645919b6439df6a78e9cf 2024-11-27T13:23:48,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d9c730ff2a9d4fd68abafc8b4051ae96 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96 2024-11-27T13:23:48,769 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96, entries=150, sequenceid=256, filesize=11.9 K 2024-11-27T13:23:48,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a2e34151755b48628e1e6f316b090666 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666 2024-11-27T13:23:48,778 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666, entries=150, sequenceid=256, filesize=11.9 K 2024-11-27T13:23:48,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9db48292456645919b6439df6a78e9cf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf 2024-11-27T13:23:48,788 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf, entries=150, sequenceid=256, filesize=11.9 K 2024-11-27T13:23:48,789 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2b5b15f41df6d1ae2583263f41ba6257 in 558ms, sequenceid=256, compaction requested=false 2024-11-27T13:23:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-27T13:23:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-27T13:23:48,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-27T13:23:48,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 720 msec 2024-11-27T13:23:48,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 732 msec 2024-11-27T13:23:48,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:23:48,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:48,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713888941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713888941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713888942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713888943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0213d036c9d64522a11802937ad70c93 is 50, key is test_row_0/A:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:48,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:48,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713888943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741893_1069 (size=12301) 2024-11-27T13:23:48,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0213d036c9d64522a11802937ad70c93 2024-11-27T13:23:48,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c6929faaab28489ab48a72f45fd8209f is 50, key is test_row_0/B:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:48,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741894_1070 (size=12301) 2024-11-27T13:23:49,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713889045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713889045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713889046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713889049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713889052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-27T13:23:49,174 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-27T13:23:49,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:49,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-27T13:23:49,179 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:49,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T13:23:49,180 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:49,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:49,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713889251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713889251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713889253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713889253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713889264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T13:23:49,333 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:49,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T13:23:49,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:49,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c6929faaab28489ab48a72f45fd8209f 2024-11-27T13:23:49,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/936b93fcd50c40eaa8adf37c8781dfa0 is 50, key is test_row_0/C:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:49,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741895_1071 (size=12301) 2024-11-27T13:23:49,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T13:23:49,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:49,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T13:23:49,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:49,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713889555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713889557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713889557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713889561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713889569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:49,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:49,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T13:23:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T13:23:49,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:49,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T13:23:49,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:49,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:49,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/936b93fcd50c40eaa8adf37c8781dfa0 2024-11-27T13:23:49,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0213d036c9d64522a11802937ad70c93 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93 2024-11-27T13:23:49,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93, entries=150, sequenceid=284, filesize=12.0 K 2024-11-27T13:23:49,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/c6929faaab28489ab48a72f45fd8209f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f 2024-11-27T13:23:49,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f, entries=150, sequenceid=284, filesize=12.0 K 2024-11-27T13:23:49,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/936b93fcd50c40eaa8adf37c8781dfa0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0 2024-11-27T13:23:49,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0, entries=150, sequenceid=284, filesize=12.0 K 2024-11-27T13:23:49,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 2b5b15f41df6d1ae2583263f41ba6257 in 932ms, sequenceid=284, compaction requested=true 2024-11-27T13:23:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:49,857 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:49,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:49,857 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:49,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:49,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:49,859 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:49,859 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:49,859 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,859 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/46f7f8981d174e4c8e8e2f79319d4a9f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.3 K 2024-11-27T13:23:49,859 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:49,859 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46f7f8981d174e4c8e8e2f79319d4a9f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:49,859 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:49,860 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,860 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/2124c083881c4f9da0520ce46e99e173, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.3 K 2024-11-27T13:23:49,860 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9c730ff2a9d4fd68abafc8b4051ae96, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732713827938 2024-11-27T13:23:49,861 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2124c083881c4f9da0520ce46e99e173, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:49,861 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0213d036c9d64522a11802937ad70c93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:49,861 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a2e34151755b48628e1e6f316b090666, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732713827938 2024-11-27T13:23:49,862 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c6929faaab28489ab48a72f45fd8209f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:49,877 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#57 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:49,878 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/50588092548741718b0144a36a7c3f51 is 50, key is test_row_0/A:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:49,894 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#58 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:49,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/5b386dc35ea343a5936ec02c043b847b is 50, key is test_row_0/B:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:49,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741896_1072 (size=12949) 2024-11-27T13:23:49,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741897_1073 (size=12949) 2024-11-27T13:23:49,946 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/5b386dc35ea343a5936ec02c043b847b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5b386dc35ea343a5936ec02c043b847b 2024-11-27T13:23:49,955 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 5b386dc35ea343a5936ec02c043b847b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:49,955 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:49,955 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713829857; duration=0sec 2024-11-27T13:23:49,955 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:49,955 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:49,956 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:49,958 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:49,958 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:49,959 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,959 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/122b33ddf7b54d9eba39992487aaabc2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.3 K 2024-11-27T13:23:49,959 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 122b33ddf7b54d9eba39992487aaabc2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732713826797 2024-11-27T13:23:49,960 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9db48292456645919b6439df6a78e9cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732713827938 2024-11-27T13:23:49,961 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 936b93fcd50c40eaa8adf37c8781dfa0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:49,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:49,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-27T13:23:49,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:49,967 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:23:49,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:49,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:49,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:49,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:49,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:49,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:49,974 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#59 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:49,975 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/0429ffb9b2124be289eb2810761f6bf8 is 50, key is test_row_0/C:col10/1732713828922/Put/seqid=0 2024-11-27T13:23:49,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d8df0d7921f44646baa613cd48cf11bd is 50, key is test_row_0/A:col10/1732713828941/Put/seqid=0 2024-11-27T13:23:50,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741898_1074 (size=12949) 2024-11-27T13:23:50,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741899_1075 (size=9857) 2024-11-27T13:23:50,010 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d8df0d7921f44646baa613cd48cf11bd 2024-11-27T13:23:50,021 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/0429ffb9b2124be289eb2810761f6bf8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/0429ffb9b2124be289eb2810761f6bf8 2024-11-27T13:23:50,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/07952e890a2b4087b05509c7624a484c is 50, key is test_row_0/B:col10/1732713828941/Put/seqid=0 2024-11-27T13:23:50,035 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 0429ffb9b2124be289eb2810761f6bf8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:50,035 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,035 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713829857; duration=0sec 2024-11-27T13:23:50,035 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:50,035 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:50,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:50,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:50,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741900_1076 (size=9857) 2024-11-27T13:23:50,069 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/07952e890a2b4087b05509c7624a484c 2024-11-27T13:23:50,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a02beae7aafc4a48897419e230ec95ef is 50, key is test_row_0/C:col10/1732713828941/Put/seqid=0 2024-11-27T13:23:50,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741901_1077 (size=9857) 2024-11-27T13:23:50,108 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a02beae7aafc4a48897419e230ec95ef 2024-11-27T13:23:50,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/d8df0d7921f44646baa613cd48cf11bd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd 2024-11-27T13:23:50,122 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd, entries=100, sequenceid=296, filesize=9.6 K 2024-11-27T13:23:50,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/07952e890a2b4087b05509c7624a484c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c 2024-11-27T13:23:50,129 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c, entries=100, sequenceid=296, filesize=9.6 K 2024-11-27T13:23:50,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a02beae7aafc4a48897419e230ec95ef as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef 2024-11-27T13:23:50,136 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef, entries=100, sequenceid=296, filesize=9.6 K 2024-11-27T13:23:50,137 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=114.05 KB/116790 for 2b5b15f41df6d1ae2583263f41ba6257 in 170ms, sequenceid=296, compaction requested=false 2024-11-27T13:23:50,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-27T13:23:50,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T13:23:50,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:50,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-27T13:23:50,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 959 msec 2024-11-27T13:23:50,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 965 msec 2024-11-27T13:23:50,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 is 50, key is test_row_0/A:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741902_1078 (size=17181) 2024-11-27T13:23:50,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 2024-11-27T13:23:50,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/694299885db7474796605084518b30d5 is 50, key is test_row_0/B:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713890174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713890179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713890183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713890183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713890183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741903_1079 (size=12301) 2024-11-27T13:23:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-27T13:23:50,285 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-27T13:23:50,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713890286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-27T13:23:50,290 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:50,291 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:50,291 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:50,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713890294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713890293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713890294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713890294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,334 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/50588092548741718b0144a36a7c3f51 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/50588092548741718b0144a36a7c3f51 2024-11-27T13:23:50,346 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 50588092548741718b0144a36a7c3f51(size=12.6 K), total size for store is 22.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:50,346 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,346 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713829857; duration=0sec 2024-11-27T13:23:50,349 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:50,349 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:50,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:50,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T13:23:50,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713890488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713890496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713890497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713890497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713890498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:50,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:50,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T13:23:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:50,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/694299885db7474796605084518b30d5 2024-11-27T13:23:50,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff186b8acd18427f84e2e230e1fc8774 is 50, key is test_row_0/C:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741904_1080 (size=12301) 2024-11-27T13:23:50,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff186b8acd18427f84e2e230e1fc8774 2024-11-27T13:23:50,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 2024-11-27T13:23:50,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6, entries=250, sequenceid=318, filesize=16.8 K 2024-11-27T13:23:50,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/694299885db7474796605084518b30d5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5 2024-11-27T13:23:50,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5, entries=150, sequenceid=318, filesize=12.0 K 2024-11-27T13:23:50,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/ff186b8acd18427f84e2e230e1fc8774 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774 2024-11-27T13:23:50,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774, entries=150, sequenceid=318, filesize=12.0 K 2024-11-27T13:23:50,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 2b5b15f41df6d1ae2583263f41ba6257 in 560ms, sequenceid=318, compaction requested=true 2024-11-27T13:23:50,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:50,698 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:50,698 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:50,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:50,699 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:50,700 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:50,700 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,700 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5b386dc35ea343a5936ec02c043b847b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=34.3 K 2024-11-27T13:23:50,700 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39987 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:50,700 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:50,700 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,700 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/50588092548741718b0144a36a7c3f51, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=39.0 K 2024-11-27T13:23:50,701 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b386dc35ea343a5936ec02c043b847b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:50,701 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50588092548741718b0144a36a7c3f51, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:50,702 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8df0d7921f44646baa613cd48cf11bd, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732713828940 2024-11-27T13:23:50,702 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 07952e890a2b4087b05509c7624a484c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732713828940 2024-11-27T13:23:50,702 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 694299885db7474796605084518b30d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:50,703 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ba83e4481ff46f39c5fb4cb8f1e1ca6, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:50,726 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:50,726 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/d671851a39db407cbc21fb26091bae5a is 50, key is test_row_0/B:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,734 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:50,735 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/4d767c0d57464647a463dc3b63287225 is 50, key is test_row_0/A:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,759 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:50,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-27T13:23:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,761 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:50,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:50,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741906_1082 (size=13051) 2024-11-27T13:23:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741905_1081 (size=13051) 2024-11-27T13:23:50,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9c98f196f3e0480c9cce920a7e9b7e7f is 50, key is test_row_0/A:col10/1732713830161/Put/seqid=0 2024-11-27T13:23:50,789 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/4d767c0d57464647a463dc3b63287225 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/4d767c0d57464647a463dc3b63287225 2024-11-27T13:23:50,790 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/d671851a39db407cbc21fb26091bae5a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d671851a39db407cbc21fb26091bae5a 2024-11-27T13:23:50,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:50,800 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 4d767c0d57464647a463dc3b63287225(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:50,800 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,800 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713830698; duration=0sec 2024-11-27T13:23:50,800 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:50,800 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:50,800 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:50,800 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into d671851a39db407cbc21fb26091bae5a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:50,800 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:50,800 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713830698; duration=0sec 2024-11-27T13:23:50,801 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:50,801 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:50,802 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:50,802 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:50,802 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:50,802 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/0429ffb9b2124be289eb2810761f6bf8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=34.3 K 2024-11-27T13:23:50,803 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0429ffb9b2124be289eb2810761f6bf8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732713828922 2024-11-27T13:23:50,804 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a02beae7aafc4a48897419e230ec95ef, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1732713828940 2024-11-27T13:23:50,805 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff186b8acd18427f84e2e230e1fc8774, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:50,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741907_1083 (size=12301) 2024-11-27T13:23:50,827 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9c98f196f3e0480c9cce920a7e9b7e7f 2024-11-27T13:23:50,829 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:50,831 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8f7657618b704adaaee8c0bc56af2ede is 50, key is test_row_0/C:col10/1732713830101/Put/seqid=0 2024-11-27T13:23:50,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713890832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713890835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713890837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713890838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713890839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/bae67d98c159409e8868fa5f7a53eb0d is 50, key is test_row_0/B:col10/1732713830161/Put/seqid=0 2024-11-27T13:23:50,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741908_1084 (size=13051) 2024-11-27T13:23:50,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741909_1085 (size=12301) 2024-11-27T13:23:50,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:50,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713890940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713890942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713890945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713890946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:50,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713890945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713891143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713891147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713891149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713891150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713891150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,264 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/bae67d98c159409e8868fa5f7a53eb0d 2024-11-27T13:23:51,279 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8f7657618b704adaaee8c0bc56af2ede as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8f7657618b704adaaee8c0bc56af2ede 2024-11-27T13:23:51,288 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 8f7657618b704adaaee8c0bc56af2ede(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:51,288 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:51,288 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713830698; duration=0sec 2024-11-27T13:23:51,288 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:51,288 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/68aa49bd6cbf42f7989ba1b2c5276003 is 50, key is test_row_0/C:col10/1732713830161/Put/seqid=0 2024-11-27T13:23:51,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741910_1086 (size=12301) 2024-11-27T13:23:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:51,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713891447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713891453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713891453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713891454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713891454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,739 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/68aa49bd6cbf42f7989ba1b2c5276003 2024-11-27T13:23:51,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9c98f196f3e0480c9cce920a7e9b7e7f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f 2024-11-27T13:23:51,754 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f, entries=150, sequenceid=334, filesize=12.0 K 2024-11-27T13:23:51,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/bae67d98c159409e8868fa5f7a53eb0d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d 2024-11-27T13:23:51,762 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d, entries=150, sequenceid=334, filesize=12.0 K 2024-11-27T13:23:51,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/68aa49bd6cbf42f7989ba1b2c5276003 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003 2024-11-27T13:23:51,770 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003, entries=150, sequenceid=334, filesize=12.0 K 2024-11-27T13:23:51,773 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 2b5b15f41df6d1ae2583263f41ba6257 in 1012ms, sequenceid=334, compaction requested=false 2024-11-27T13:23:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-27T13:23:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-27T13:23:51,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-27T13:23:51,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4880 sec 2024-11-27T13:23:51,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.4930 sec 2024-11-27T13:23:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:51,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:51,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:51,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/c8872e7124cd4a02bf8715d4d3be3b2e is 50, key is test_row_0/A:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:51,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713891968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713891968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713891971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713891972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713891972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:51,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741911_1087 (size=14741) 2024-11-27T13:23:51,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/c8872e7124cd4a02bf8715d4d3be3b2e 2024-11-27T13:23:52,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/77fe60eda94346d0a5f4480af3f93bbe is 50, key is test_row_0/B:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:52,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741912_1088 (size=12301) 2024-11-27T13:23:52,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/77fe60eda94346d0a5f4480af3f93bbe 2024-11-27T13:23:52,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/84ba983dcbf442a6b3c91070895bc754 is 50, key is test_row_0/C:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:52,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741913_1089 (size=12301) 2024-11-27T13:23:52,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/84ba983dcbf442a6b3c91070895bc754 2024-11-27T13:23:52,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/c8872e7124cd4a02bf8715d4d3be3b2e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e 2024-11-27T13:23:52,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713892074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e, entries=200, sequenceid=360, filesize=14.4 K 2024-11-27T13:23:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/77fe60eda94346d0a5f4480af3f93bbe as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe 2024-11-27T13:23:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713892074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713892074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713892074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713892075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe, entries=150, sequenceid=360, filesize=12.0 K 2024-11-27T13:23:52,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/84ba983dcbf442a6b3c91070895bc754 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754 2024-11-27T13:23:52,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754, entries=150, sequenceid=360, filesize=12.0 K 2024-11-27T13:23:52,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 2b5b15f41df6d1ae2583263f41ba6257 in 140ms, sequenceid=360, compaction requested=true 2024-11-27T13:23:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:52,096 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:52,096 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:52,098 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:52,098 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:52,098 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,098 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/4d767c0d57464647a463dc3b63287225, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=39.2 K 2024-11-27T13:23:52,099 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:52,099 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:52,099 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,099 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d671851a39db407cbc21fb26091bae5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.8 K 2024-11-27T13:23:52,099 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d767c0d57464647a463dc3b63287225, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:52,100 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d671851a39db407cbc21fb26091bae5a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:52,100 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c98f196f3e0480c9cce920a7e9b7e7f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732713830161 2024-11-27T13:23:52,101 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:52,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8872e7124cd4a02bf8715d4d3be3b2e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:52,101 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bae67d98c159409e8868fa5f7a53eb0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732713830161 2024-11-27T13:23:52,102 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 77fe60eda94346d0a5f4480af3f93bbe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:52,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:52,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:52,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:52,118 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:52,119 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9bc831087ad942a78a2a5481e1d0842a is 50, key is test_row_0/A:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:52,126 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:52,127 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e4502befd7304960957465215e5153fb is 50, key is test_row_0/B:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:52,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741915_1091 (size=13153) 2024-11-27T13:23:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741914_1090 (size=13153) 2024-11-27T13:23:52,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T13:23:52,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:52,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:52,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:52,280 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:52,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9d18baf650834fa8a1544cafcf093699 is 50, key is test_row_0/A:col10/1732713832277/Put/seqid=0 2024-11-27T13:23:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741916_1092 (size=12301) 2024-11-27T13:23:52,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9d18baf650834fa8a1544cafcf093699 2024-11-27T13:23:52,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713892301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713892303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713892304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713892306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713892306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/320e98c84dcf46739ace6db9a62b17ff is 50, key is test_row_0/B:col10/1732713832277/Put/seqid=0 2024-11-27T13:23:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741917_1093 (size=12301) 2024-11-27T13:23:52,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/320e98c84dcf46739ace6db9a62b17ff 2024-11-27T13:23:52,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/6545ad442eb54ce0bef83f4c42ab768c is 50, key is test_row_0/C:col10/1732713832277/Put/seqid=0 2024-11-27T13:23:52,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741918_1094 (size=12301) 2024-11-27T13:23:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-27T13:23:52,399 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-27T13:23:52,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-27T13:23:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:52,403 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:52,404 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:52,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:52,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713892407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713892408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713892408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713892412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713892413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:52,559 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:52,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T13:23:52,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,581 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e4502befd7304960957465215e5153fb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e4502befd7304960957465215e5153fb 2024-11-27T13:23:52,589 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9bc831087ad942a78a2a5481e1d0842a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9bc831087ad942a78a2a5481e1d0842a 2024-11-27T13:23:52,597 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 9bc831087ad942a78a2a5481e1d0842a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:52,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:52,597 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=13, startTime=1732713832096; duration=0sec 2024-11-27T13:23:52,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:52,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:52,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:52,598 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into e4502befd7304960957465215e5153fb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:52,598 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:52,598 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713832096; duration=0sec 2024-11-27T13:23:52,598 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:52,598 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:52,599 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:52,600 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:52,600 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,600 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8f7657618b704adaaee8c0bc56af2ede, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=36.8 K 2024-11-27T13:23:52,601 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f7657618b704adaaee8c0bc56af2ede, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732713830080 2024-11-27T13:23:52,601 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68aa49bd6cbf42f7989ba1b2c5276003, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732713830161 2024-11-27T13:23:52,602 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84ba983dcbf442a6b3c91070895bc754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:52,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713892612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,615 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#80 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:52,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713892613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,616 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/67ab94fc262046a0aed2a3a349ac541c is 50, key is test_row_0/C:col10/1732713830822/Put/seqid=0 2024-11-27T13:23:52,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713892617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713892617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713892617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741919_1095 (size=13153) 2024-11-27T13:23:52,650 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/67ab94fc262046a0aed2a3a349ac541c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/67ab94fc262046a0aed2a3a349ac541c 2024-11-27T13:23:52,659 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 67ab94fc262046a0aed2a3a349ac541c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:52,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:52,659 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=13, startTime=1732713832108; duration=0sec 2024-11-27T13:23:52,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:52,661 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:52,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:52,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T13:23:52,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:52,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:52,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/6545ad442eb54ce0bef83f4c42ab768c 2024-11-27T13:23:52,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/9d18baf650834fa8a1544cafcf093699 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699 2024-11-27T13:23:52,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699, entries=150, sequenceid=376, filesize=12.0 K 2024-11-27T13:23:52,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/320e98c84dcf46739ace6db9a62b17ff as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff 2024-11-27T13:23:52,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff, entries=150, sequenceid=376, filesize=12.0 K 2024-11-27T13:23:52,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/6545ad442eb54ce0bef83f4c42ab768c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c 2024-11-27T13:23:52,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c, entries=150, sequenceid=376, filesize=12.0 K 2024-11-27T13:23:52,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2b5b15f41df6d1ae2583263f41ba6257 in 556ms, sequenceid=376, compaction requested=false 2024-11-27T13:23:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:52,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:52,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-27T13:23:52,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:52,870 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:52,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:52,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fc39e7359c8e44438f7c999ec6339c7c is 50, key is test_row_0/A:col10/1732713832298/Put/seqid=0 2024-11-27T13:23:52,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741920_1096 (size=12301) 2024-11-27T13:23:52,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:52,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713892933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713892934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713892935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713892936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:52,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713892937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713893039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713893039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713893040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713893040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713893040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713893244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713893244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713893244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713893247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713893248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,283 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fc39e7359c8e44438f7c999ec6339c7c 2024-11-27T13:23:53,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/ab11be31be5f4e3d959a1927c105ec7b is 50, key is test_row_0/B:col10/1732713832298/Put/seqid=0 2024-11-27T13:23:53,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741921_1097 (size=12301) 2024-11-27T13:23:53,299 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/ab11be31be5f4e3d959a1927c105ec7b 2024-11-27T13:23:53,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/df8db77ff14549e6af2d5fbb44ff4362 is 50, key is test_row_0/C:col10/1732713832298/Put/seqid=0 2024-11-27T13:23:53,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741922_1098 (size=12301) 2024-11-27T13:23:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713893549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713893549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713893549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713893550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713893550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:53,728 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/df8db77ff14549e6af2d5fbb44ff4362 2024-11-27T13:23:53,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fc39e7359c8e44438f7c999ec6339c7c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c 2024-11-27T13:23:53,741 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c, entries=150, sequenceid=399, filesize=12.0 K 2024-11-27T13:23:53,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/ab11be31be5f4e3d959a1927c105ec7b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b 2024-11-27T13:23:53,748 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b, entries=150, sequenceid=399, filesize=12.0 K 2024-11-27T13:23:53,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/df8db77ff14549e6af2d5fbb44ff4362 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362 2024-11-27T13:23:53,755 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362, entries=150, sequenceid=399, filesize=12.0 K 2024-11-27T13:23:53,757 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2b5b15f41df6d1ae2583263f41ba6257 in 887ms, sequenceid=399, compaction requested=true 2024-11-27T13:23:53,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:53,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:53,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-27T13:23:53,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-27T13:23:53,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-27T13:23:53,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3550 sec 2024-11-27T13:23:53,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.3600 sec 2024-11-27T13:23:54,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:54,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:54,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/35d58ffdb254496982021dc67086c9bc is 50, key is test_row_0/A:col10/1732713832926/Put/seqid=0 2024-11-27T13:23:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741923_1099 (size=12301) 2024-11-27T13:23:54,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/35d58ffdb254496982021dc67086c9bc 2024-11-27T13:23:54,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713894073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713894075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713894075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713894076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713894077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a18728d88bce4f428cdcec036c85b645 is 50, key is test_row_0/B:col10/1732713832926/Put/seqid=0 2024-11-27T13:23:54,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741924_1100 (size=12301) 2024-11-27T13:23:54,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713894179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713894180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713894180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713894180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713894180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713894383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713894384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713894384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713894384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713894385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a18728d88bce4f428cdcec036c85b645 2024-11-27T13:23:54,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/b3e9bc9b52ca464e8abdf20bb4618a0f is 50, key is test_row_0/C:col10/1732713832926/Put/seqid=0 2024-11-27T13:23:54,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-27T13:23:54,507 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-27T13:23:54,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:54,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-27T13:23:54,511 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:54,512 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:54,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:54,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T13:23:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741925_1101 (size=12301) 2024-11-27T13:23:54,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T13:23:54,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:54,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T13:23:54,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:54,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713894686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713894687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713894688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713894689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:54,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713894691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:54,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T13:23:54,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:54,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T13:23:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:54,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:54,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/b3e9bc9b52ca464e8abdf20bb4618a0f 2024-11-27T13:23:54,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/35d58ffdb254496982021dc67086c9bc as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc 2024-11-27T13:23:54,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc, entries=150, sequenceid=416, filesize=12.0 K 2024-11-27T13:23:54,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a18728d88bce4f428cdcec036c85b645 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645 2024-11-27T13:23:54,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645, entries=150, sequenceid=416, filesize=12.0 K 2024-11-27T13:23:54,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/b3e9bc9b52ca464e8abdf20bb4618a0f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f 2024-11-27T13:23:54,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f, entries=150, sequenceid=416, filesize=12.0 K 2024-11-27T13:23:54,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 2b5b15f41df6d1ae2583263f41ba6257 in 898ms, sequenceid=416, compaction requested=true 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:54,955 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:54,956 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:54,958 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:54,958 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:54,958 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,958 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9bc831087ad942a78a2a5481e1d0842a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=48.9 K 2024-11-27T13:23:54,959 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:54,959 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bc831087ad942a78a2a5481e1d0842a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:54,959 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:54,959 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,959 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e4502befd7304960957465215e5153fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=48.9 K 2024-11-27T13:23:54,960 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d18baf650834fa8a1544cafcf093699, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732713831966 2024-11-27T13:23:54,960 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e4502befd7304960957465215e5153fb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:54,960 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc39e7359c8e44438f7c999ec6339c7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732713832298 2024-11-27T13:23:54,961 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 320e98c84dcf46739ace6db9a62b17ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732713831966 2024-11-27T13:23:54,961 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35d58ffdb254496982021dc67086c9bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732713832926 2024-11-27T13:23:54,961 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ab11be31be5f4e3d959a1927c105ec7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732713832298 2024-11-27T13:23:54,963 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a18728d88bce4f428cdcec036c85b645, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732713832926 2024-11-27T13:23:54,972 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:54,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-27T13:23:54,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:54,973 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-27T13:23:54,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:54,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:54,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:54,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:54,984 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#87 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:54,987 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/8093ed0786814a2e8ee00b437bb03969 is 50, key is test_row_0/A:col10/1732713832926/Put/seqid=0 2024-11-27T13:23:54,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cdc4acb85c0444fe89d4c61e0d551a47 is 50, key is test_row_0/A:col10/1732713834075/Put/seqid=0 2024-11-27T13:23:54,998 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:54,999 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a6c051565d40420a889ec441e3e29d0a is 50, key is test_row_0/B:col10/1732713832926/Put/seqid=0 2024-11-27T13:23:55,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741927_1103 (size=12301) 2024-11-27T13:23:55,055 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cdc4acb85c0444fe89d4c61e0d551a47 2024-11-27T13:23:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741926_1102 (size=13289) 2024-11-27T13:23:55,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/54264c569ad840b288800ca4d5a3a8c0 is 50, key is test_row_0/B:col10/1732713834075/Put/seqid=0 2024-11-27T13:23:55,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741928_1104 (size=13289) 2024-11-27T13:23:55,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741929_1105 (size=12301) 2024-11-27T13:23:55,086 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/54264c569ad840b288800ca4d5a3a8c0 2024-11-27T13:23:55,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/446e8414100749949940a009abda11d7 is 50, key is test_row_0/C:col10/1732713834075/Put/seqid=0 2024-11-27T13:23:55,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741930_1106 (size=12301) 2024-11-27T13:23:55,104 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/446e8414100749949940a009abda11d7 2024-11-27T13:23:55,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/cdc4acb85c0444fe89d4c61e0d551a47 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47 2024-11-27T13:23:55,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T13:23:55,119 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47, entries=150, sequenceid=435, filesize=12.0 K 2024-11-27T13:23:55,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/54264c569ad840b288800ca4d5a3a8c0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0 2024-11-27T13:23:55,130 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0, entries=150, sequenceid=435, filesize=12.0 K 2024-11-27T13:23:55,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/446e8414100749949940a009abda11d7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7 2024-11-27T13:23:55,175 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7, entries=150, sequenceid=435, filesize=12.0 K 2024-11-27T13:23:55,176 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 2b5b15f41df6d1ae2583263f41ba6257 in 204ms, sequenceid=435, compaction requested=true 2024-11-27T13:23:55,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:55,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-27T13:23:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-27T13:23:55,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-27T13:23:55,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 665 msec 2024-11-27T13:23:55,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 671 msec 2024-11-27T13:23:55,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:55,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:23:55,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:55,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:55,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:55,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0b442d7df5c84bb68b90893bc3c44719 is 50, key is test_row_0/A:col10/1732713835199/Put/seqid=0 2024-11-27T13:23:55,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713895238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713895240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713895242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713895243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713895244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741931_1107 (size=17181) 2024-11-27T13:23:55,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0b442d7df5c84bb68b90893bc3c44719 2024-11-27T13:23:55,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/46d161d925544eeebca5371a3643287b is 50, key is test_row_0/B:col10/1732713835199/Put/seqid=0 2024-11-27T13:23:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741932_1108 (size=12301) 2024-11-27T13:23:55,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/46d161d925544eeebca5371a3643287b 2024-11-27T13:23:55,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/07a5210f21584275aac37e90d5662d46 is 50, key is test_row_0/C:col10/1732713835199/Put/seqid=0 2024-11-27T13:23:55,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741933_1109 (size=12301) 2024-11-27T13:23:55,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/07a5210f21584275aac37e90d5662d46 2024-11-27T13:23:55,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/0b442d7df5c84bb68b90893bc3c44719 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719 2024-11-27T13:23:55,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719, entries=250, sequenceid=446, filesize=16.8 K 2024-11-27T13:23:55,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/46d161d925544eeebca5371a3643287b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b 2024-11-27T13:23:55,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b, entries=150, sequenceid=446, filesize=12.0 K 2024-11-27T13:23:55,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/07a5210f21584275aac37e90d5662d46 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46 2024-11-27T13:23:55,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713895346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713895347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713895348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713895348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713895348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46, entries=150, sequenceid=446, filesize=12.0 K 2024-11-27T13:23:55,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 2b5b15f41df6d1ae2583263f41ba6257 in 150ms, sequenceid=446, compaction requested=true 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:55,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T13:23:55,474 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/8093ed0786814a2e8ee00b437bb03969 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8093ed0786814a2e8ee00b437bb03969 2024-11-27T13:23:55,486 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/a6c051565d40420a889ec441e3e29d0a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6c051565d40420a889ec441e3e29d0a 2024-11-27T13:23:55,498 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into a6c051565d40420a889ec441e3e29d0a(size=13.0 K), total size for store is 37.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:55,498 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,499 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=12, startTime=1732713834955; duration=0sec 2024-11-27T13:23:55,499 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-27T13:23:55,499 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:55,500 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-27T13:23:55,501 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T13:23:55,501 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T13:23:55,501 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-27T13:23:55,502 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 8093ed0786814a2e8ee00b437bb03969(size=13.0 K), total size for store is 41.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,502 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=12, startTime=1732713834955; duration=0sec 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:55,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:55,506 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74658 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 6 compacting, 0 eligible, 16 blocking 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T13:23:55,507 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:55,507 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:23:55,507 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/67ab94fc262046a0aed2a3a349ac541c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=72.9 K 2024-11-27T13:23:55,508 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:23:55,508 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:55,509 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:55,509 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6c051565d40420a889ec441e3e29d0a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=37.0 K 2024-11-27T13:23:55,509 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6c051565d40420a889ec441e3e29d0a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732713832926 2024-11-27T13:23:55,510 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54264c569ad840b288800ca4d5a3a8c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732713834069 2024-11-27T13:23:55,510 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46d161d925544eeebca5371a3643287b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713835199 2024-11-27T13:23:55,514 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 67ab94fc262046a0aed2a3a349ac541c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732713830822 2024-11-27T13:23:55,516 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6545ad442eb54ce0bef83f4c42ab768c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732713831966 2024-11-27T13:23:55,519 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting df8db77ff14549e6af2d5fbb44ff4362, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732713832298 2024-11-27T13:23:55,520 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b3e9bc9b52ca464e8abdf20bb4618a0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732713832926 2024-11-27T13:23:55,520 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 446e8414100749949940a009abda11d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732713834069 2024-11-27T13:23:55,521 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 07a5210f21584275aac37e90d5662d46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713835199 2024-11-27T13:23:55,530 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:55,531 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/1502f0c2198b48a586222569dfe29dec is 50, key is test_row_0/B:col10/1732713835199/Put/seqid=0 2024-11-27T13:23:55,546 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#96 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:55,547 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9b723e219c40483890a9c1abf156ec95 is 50, key is test_row_0/C:col10/1732713835199/Put/seqid=0 2024-11-27T13:23:55,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T13:23:55,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:55,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:55,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:55,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:55,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713895559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713895560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713895561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713895564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713895564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/54e3b619b39a482a8155365da3c11c58 is 50, key is test_row_0/A:col10/1732713835241/Put/seqid=0 2024-11-27T13:23:55,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741934_1110 (size=13391) 2024-11-27T13:23:55,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741935_1111 (size=13357) 2024-11-27T13:23:55,602 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/9b723e219c40483890a9c1abf156ec95 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9b723e219c40483890a9c1abf156ec95 2024-11-27T13:23:55,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741936_1112 (size=12301) 2024-11-27T13:23:55,615 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 9b723e219c40483890a9c1abf156ec95(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:55,615 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,615 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=10, startTime=1732713835353; duration=0sec 2024-11-27T13:23:55,615 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:55,615 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-27T13:23:55,618 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-27T13:23:55,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:55,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/54e3b619b39a482a8155365da3c11c58 2024-11-27T13:23:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-27T13:23:55,625 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:55,626 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:55,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:55,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T13:23:55,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/75f1242bc8d248ad9f7aacf3e43318da is 50, key is test_row_0/B:col10/1732713835241/Put/seqid=0 2024-11-27T13:23:55,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741937_1113 (size=12301) 2024-11-27T13:23:55,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/75f1242bc8d248ad9f7aacf3e43318da 2024-11-27T13:23:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713895665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713895666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713895668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713895672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713895672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8fad37c81fae4b81a646cc01986d4751 is 50, key is test_row_0/C:col10/1732713835241/Put/seqid=0 2024-11-27T13:23:55,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741938_1114 (size=12301) 2024-11-27T13:23:55,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8fad37c81fae4b81a646cc01986d4751 2024-11-27T13:23:55,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/54e3b619b39a482a8155365da3c11c58 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58 2024-11-27T13:23:55,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58, entries=150, sequenceid=474, filesize=12.0 K 2024-11-27T13:23:55,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/75f1242bc8d248ad9f7aacf3e43318da as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da 2024-11-27T13:23:55,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da, entries=150, sequenceid=474, filesize=12.0 K 2024-11-27T13:23:55,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/8fad37c81fae4b81a646cc01986d4751 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751 2024-11-27T13:23:55,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751, entries=150, sequenceid=474, filesize=12.0 K 2024-11-27T13:23:55,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 2b5b15f41df6d1ae2583263f41ba6257 in 162ms, sequenceid=474, compaction requested=true 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,716 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:55,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:55,718 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55072 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:23:55,718 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:55,718 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:55,718 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8093ed0786814a2e8ee00b437bb03969, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=53.8 K 2024-11-27T13:23:55,719 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8093ed0786814a2e8ee00b437bb03969, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732713832926 2024-11-27T13:23:55,719 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cdc4acb85c0444fe89d4c61e0d551a47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732713834069 2024-11-27T13:23:55,720 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b442d7df5c84bb68b90893bc3c44719, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713835192 2024-11-27T13:23:55,720 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 54e3b619b39a482a8155365da3c11c58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732713835237 2024-11-27T13:23:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T13:23:55,731 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#100 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:55,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/01f5e9061008417f807130c6d7da9549 is 50, key is test_row_0/A:col10/1732713835241/Put/seqid=0 2024-11-27T13:23:55,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741939_1115 (size=13425) 2024-11-27T13:23:55,754 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/01f5e9061008417f807130c6d7da9549 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/01f5e9061008417f807130c6d7da9549 2024-11-27T13:23:55,761 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into 01f5e9061008417f807130c6d7da9549(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:55,761 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,761 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=12, startTime=1732713835716; duration=0sec 2024-11-27T13:23:55,761 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:23:55,761 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:55,761 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-27T13:23:55,762 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T13:23:55,762 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T13:23:55,762 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:23:55,762 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:55,762 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T13:23:55,763 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T13:23:55,763 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T13:23:55,763 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:23:55,763 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:55,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:55,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-27T13:23:55,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:55,782 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T13:23:55,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:55,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:55,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fdff8ad491b4498996fbc570a0b6e39e is 50, key is test_row_0/A:col10/1732713835562/Put/seqid=0 2024-11-27T13:23:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741940_1116 (size=12301) 2024-11-27T13:23:55,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:55,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:55,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713895901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713895900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713895902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713895903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713895903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:55,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T13:23:55,981 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/1502f0c2198b48a586222569dfe29dec as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1502f0c2198b48a586222569dfe29dec 2024-11-27T13:23:55,993 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 1502f0c2198b48a586222569dfe29dec(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:55,993 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:55,994 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=13, startTime=1732713835353; duration=0sec 2024-11-27T13:23:55,994 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:55,994 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:23:56,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713896005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713896005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713896008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713896008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713896008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,203 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fdff8ad491b4498996fbc570a0b6e39e 2024-11-27T13:23:56,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713896207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713896209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713896210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713896211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713896211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 is 50, key is test_row_0/B:col10/1732713835562/Put/seqid=0 2024-11-27T13:23:56,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741941_1117 (size=12301) 2024-11-27T13:23:56,219 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 2024-11-27T13:23:56,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/2e5b55b104124fc78ac9a6dfcae4371b is 50, key is test_row_0/C:col10/1732713835562/Put/seqid=0 2024-11-27T13:23:56,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T13:23:56,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741942_1118 (size=12301) 2024-11-27T13:23:56,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713896509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713896512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713896513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713896514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:56,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713896514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:56,634 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/2e5b55b104124fc78ac9a6dfcae4371b 2024-11-27T13:23:56,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/fdff8ad491b4498996fbc570a0b6e39e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e 2024-11-27T13:23:56,652 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e, entries=150, sequenceid=486, filesize=12.0 K 2024-11-27T13:23:56,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 2024-11-27T13:23:56,659 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0, entries=150, sequenceid=486, filesize=12.0 K 2024-11-27T13:23:56,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/2e5b55b104124fc78ac9a6dfcae4371b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b 2024-11-27T13:23:56,668 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b, entries=150, sequenceid=486, filesize=12.0 K 2024-11-27T13:23:56,669 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 2b5b15f41df6d1ae2583263f41ba6257 in 887ms, sequenceid=486, compaction requested=true 2024-11-27T13:23:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-27T13:23:56,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-27T13:23:56,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-27T13:23:56,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0440 sec 2024-11-27T13:23:56,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.0530 sec 2024-11-27T13:23:56,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-27T13:23:56,731 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-27T13:23:56,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:56,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-27T13:23:56,734 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:56,736 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:56,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:56,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T13:23:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T13:23:56,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:56,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:56,888 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:56,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/79f09da0154942b6b633ab50509838df is 50, key is test_row_0/A:col10/1732713835902/Put/seqid=0 2024-11-27T13:23:56,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741943_1119 (size=12301) 2024-11-27T13:23:57,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:57,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:57,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713897020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713897021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713897021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713897022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713897023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T13:23:57,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713897125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713897125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713897126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,318 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/79f09da0154942b6b633ab50509838df 2024-11-27T13:23:57,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713897329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713897329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/5d7b555ed0d445efa3464851f373b52d is 50, key is test_row_0/B:col10/1732713835902/Put/seqid=0 2024-11-27T13:23:57,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713897331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741944_1120 (size=12301) 2024-11-27T13:23:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T13:23:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713897632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713897632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:57,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713897635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:57,742 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/5d7b555ed0d445efa3464851f373b52d 2024-11-27T13:23:57,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/5c18c50430bb496c8cb0ec81d53fbe49 is 50, key is test_row_0/C:col10/1732713835902/Put/seqid=0 2024-11-27T13:23:57,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741945_1121 (size=12301) 2024-11-27T13:23:57,758 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/5c18c50430bb496c8cb0ec81d53fbe49 2024-11-27T13:23:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/79f09da0154942b6b633ab50509838df as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df 2024-11-27T13:23:57,770 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df, entries=150, sequenceid=513, filesize=12.0 K 2024-11-27T13:23:57,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/5d7b555ed0d445efa3464851f373b52d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d 2024-11-27T13:23:57,778 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d, entries=150, sequenceid=513, filesize=12.0 K 2024-11-27T13:23:57,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/5c18c50430bb496c8cb0ec81d53fbe49 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49 2024-11-27T13:23:57,784 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49, entries=150, sequenceid=513, filesize=12.0 K 2024-11-27T13:23:57,785 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 2b5b15f41df6d1ae2583263f41ba6257 in 897ms, sequenceid=513, compaction requested=true 2024-11-27T13:23:57,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:57,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:57,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-27T13:23:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-27T13:23:57,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-27T13:23:57,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0510 sec 2024-11-27T13:23:57,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.0580 sec 2024-11-27T13:23:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-27T13:23:57,840 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-27T13:23:57,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-27T13:23:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T13:23:57,843 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:57,843 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:57,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T13:23:57,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:57,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-27T13:23:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:57,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T13:23:57,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:57,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:57,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:57,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:57,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:57,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:58,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/41d639d1381549f8bdc404d763d694f9 is 50, key is test_row_0/A:col10/1732713837020/Put/seqid=0 2024-11-27T13:23:58,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741946_1122 (size=9857) 2024-11-27T13:23:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:58,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713898069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713898071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713898137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713898138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713898138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T13:23:58,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713898173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713898174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713898375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713898378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,411 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/41d639d1381549f8bdc404d763d694f9 2024-11-27T13:23:58,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/1de22228ce5e474fa701eb374cdf6f5a is 50, key is test_row_0/B:col10/1732713837020/Put/seqid=0 2024-11-27T13:23:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741947_1123 (size=9857) 2024-11-27T13:23:58,443 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/1de22228ce5e474fa701eb374cdf6f5a 2024-11-27T13:23:58,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T13:23:58,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/01b830598ac74ee99e2517e8b5dab7a8 is 50, key is test_row_0/C:col10/1732713837020/Put/seqid=0 2024-11-27T13:23:58,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741948_1124 (size=9857) 2024-11-27T13:23:58,466 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/01b830598ac74ee99e2517e8b5dab7a8 2024-11-27T13:23:58,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/41d639d1381549f8bdc404d763d694f9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9 2024-11-27T13:23:58,478 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9, entries=100, sequenceid=523, filesize=9.6 K 2024-11-27T13:23:58,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/1de22228ce5e474fa701eb374cdf6f5a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a 2024-11-27T13:23:58,484 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a, entries=100, sequenceid=523, filesize=9.6 K 2024-11-27T13:23:58,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/01b830598ac74ee99e2517e8b5dab7a8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8 2024-11-27T13:23:58,490 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8, entries=100, sequenceid=523, filesize=9.6 K 2024-11-27T13:23:58,491 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 2b5b15f41df6d1ae2583263f41ba6257 in 495ms, sequenceid=523, compaction requested=true 2024-11-27T13:23:58,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:58,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:58,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-27T13:23:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-27T13:23:58,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-27T13:23:58,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 650 msec 2024-11-27T13:23:58,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 654 msec 2024-11-27T13:23:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:58,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:58,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:58,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/805acbc8d8004ef69f87448daf13563f is 50, key is test_row_0/A:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:58,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713898713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713898714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741949_1125 (size=14741) 2024-11-27T13:23:58,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713898815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713898816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-27T13:23:58,946 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-27T13:23:58,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:23:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-27T13:23:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:23:58,949 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:23:58,950 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:23:58,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:23:59,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713899017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713899018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:23:59,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:59,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-27T13:23:59,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:59,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:59,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:23:59,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/805acbc8d8004ef69f87448daf13563f 2024-11-27T13:23:59,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/cc1b507524d14c48bfc48ec91ad4f014 is 50, key is test_row_0/B:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:59,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44622 deadline: 1732713899141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44602 deadline: 1732713899144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44620 deadline: 1732713899150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741950_1126 (size=12301) 2024-11-27T13:23:59,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/cc1b507524d14c48bfc48ec91ad4f014 2024-11-27T13:23:59,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/49157907ca264ddfbf0dd1f4501292ac is 50, key is test_row_0/C:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:59,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741951_1127 (size=12301) 2024-11-27T13:23:59,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=551 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/49157907ca264ddfbf0dd1f4501292ac 2024-11-27T13:23:59,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/805acbc8d8004ef69f87448daf13563f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f 2024-11-27T13:23:59,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f, entries=200, sequenceid=551, filesize=14.4 K 2024-11-27T13:23:59,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/cc1b507524d14c48bfc48ec91ad4f014 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014 2024-11-27T13:23:59,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014, entries=150, sequenceid=551, filesize=12.0 K 2024-11-27T13:23:59,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/49157907ca264ddfbf0dd1f4501292ac as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac 2024-11-27T13:23:59,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac, entries=150, sequenceid=551, filesize=12.0 K 2024-11-27T13:23:59,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 2b5b15f41df6d1ae2583263f41ba6257 in 514ms, sequenceid=551, compaction requested=true 2024-11-27T13:23:59,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:59,219 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:23:59,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:23:59,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:59,220 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-27T13:23:59,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:23:59,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:59,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:23:59,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:59,222 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62625 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:23:59,222 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/A is initiating minor compaction (all files) 2024-11-27T13:23:59,222 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/A in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,222 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/01f5e9061008417f807130c6d7da9549, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=61.2 K 2024-11-27T13:23:59,222 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72452 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-27T13:23:59,222 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/B is initiating minor compaction (all files) 2024-11-27T13:23:59,223 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/B in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,223 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1502f0c2198b48a586222569dfe29dec, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=70.8 K 2024-11-27T13:23:59,223 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01f5e9061008417f807130c6d7da9549, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732713835237 2024-11-27T13:23:59,223 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1502f0c2198b48a586222569dfe29dec, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713835199 2024-11-27T13:23:59,224 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdff8ad491b4498996fbc570a0b6e39e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732713835554 2024-11-27T13:23:59,224 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 75f1242bc8d248ad9f7aacf3e43318da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732713835237 2024-11-27T13:23:59,224 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79f09da0154942b6b633ab50509838df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732713835899 2024-11-27T13:23:59,226 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting dcbdc597fc9c476b9e5bb3ecd9ded7f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732713835554 2024-11-27T13:23:59,226 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41d639d1381549f8bdc404d763d694f9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732713837020 2024-11-27T13:23:59,227 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d7b555ed0d445efa3464851f373b52d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732713835899 2024-11-27T13:23:59,227 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 805acbc8d8004ef69f87448daf13563f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1732713838054 2024-11-27T13:23:59,227 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1de22228ce5e474fa701eb374cdf6f5a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732713837020 2024-11-27T13:23:59,228 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cc1b507524d14c48bfc48ec91ad4f014, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1732713838067 2024-11-27T13:23:59,250 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#A#compaction#113 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:59,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:23:59,251 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/c8e0c620e5974dd0bfbca2e2722908aa is 50, key is test_row_0/A:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:59,255 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:23:59,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,256 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:23:59,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:23:59,258 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#B#compaction#114 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:59,261 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/453ca59ec7ce41c1ad0dc109b6c5ec09 is 50, key is test_row_0/B:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:59,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bcf29f38d2c242328892e3ee51ec0b0e is 50, key is test_row_0/A:col10/1732713838713/Put/seqid=0 2024-11-27T13:23:59,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741952_1128 (size=13595) 2024-11-27T13:23:59,295 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/c8e0c620e5974dd0bfbca2e2722908aa as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8e0c620e5974dd0bfbca2e2722908aa 2024-11-27T13:23:59,302 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/A of 2b5b15f41df6d1ae2583263f41ba6257 into c8e0c620e5974dd0bfbca2e2722908aa(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:59,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:59,302 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/A, priority=11, startTime=1732713839219; duration=0sec 2024-11-27T13:23:59,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:23:59,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:23:59,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-27T13:23:59,304 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72418 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-27T13:23:59,304 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 2b5b15f41df6d1ae2583263f41ba6257/C is initiating minor compaction (all files) 2024-11-27T13:23:59,304 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2b5b15f41df6d1ae2583263f41ba6257/C in TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:23:59,305 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9b723e219c40483890a9c1abf156ec95, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp, totalSize=70.7 K 2024-11-27T13:23:59,305 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b723e219c40483890a9c1abf156ec95, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713835199 2024-11-27T13:23:59,306 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fad37c81fae4b81a646cc01986d4751, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732713835237 2024-11-27T13:23:59,307 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e5b55b104124fc78ac9a6dfcae4371b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732713835554 2024-11-27T13:23:59,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741954_1130 (size=9857) 2024-11-27T13:23:59,309 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c18c50430bb496c8cb0ec81d53fbe49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732713835899 2024-11-27T13:23:59,309 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01b830598ac74ee99e2517e8b5dab7a8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1732713837020 2024-11-27T13:23:59,315 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49157907ca264ddfbf0dd1f4501292ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=551, earliestPutTs=1732713838067 2024-11-27T13:23:59,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741953_1129 (size=13595) 2024-11-27T13:23:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:23:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. as already flushing 2024-11-27T13:23:59,339 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2b5b15f41df6d1ae2583263f41ba6257#C#compaction#116 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:23:59,339 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/85511a4bc4c349c1922735d8f2377460 is 50, key is test_row_0/C:col10/1732713838067/Put/seqid=0 2024-11-27T13:23:59,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741955_1131 (size=13561) 2024-11-27T13:23:59,349 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/85511a4bc4c349c1922735d8f2377460 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/85511a4bc4c349c1922735d8f2377460 2024-11-27T13:23:59,364 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/C of 2b5b15f41df6d1ae2583263f41ba6257 into 85511a4bc4c349c1922735d8f2377460(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:59,364 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:59,364 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/C, priority=10, startTime=1732713839220; duration=0sec 2024-11-27T13:23:59,364 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:59,364 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:23:59,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713899392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713899395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713899497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713899499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:23:59,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713899699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:23:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713899703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:23:59,710 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bcf29f38d2c242328892e3ee51ec0b0e 2024-11-27T13:23:59,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/fe9c17ea3bcd405b9e515e7fdb7855cd is 50, key is test_row_0/B:col10/1732713838713/Put/seqid=0 2024-11-27T13:23:59,721 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:59011 2024-11-27T13:23:59,721 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:59011 2024-11-27T13:23:59,721 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:59011 2024-11-27T13:23:59,721 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:59011 2024-11-27T13:23:59,722 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:23:59,722 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:23:59,722 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:23:59,722 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:23:59,726 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/453ca59ec7ce41c1ad0dc109b6c5ec09 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/453ca59ec7ce41c1ad0dc109b6c5ec09 2024-11-27T13:23:59,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741956_1132 (size=9857) 2024-11-27T13:23:59,732 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 2b5b15f41df6d1ae2583263f41ba6257/B of 2b5b15f41df6d1ae2583263f41ba6257 into 453ca59ec7ce41c1ad0dc109b6c5ec09(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:23:59,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:23:59,732 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257., storeName=2b5b15f41df6d1ae2583263f41ba6257/B, priority=10, startTime=1732713839219; duration=0sec 2024-11-27T13:23:59,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:23:59,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:24:00,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713900003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:00,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713900005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:24:00,130 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/fe9c17ea3bcd405b9e515e7fdb7855cd 2024-11-27T13:24:00,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a5e5fb5974c74c729dec4140422ce726 is 50, key is test_row_0/C:col10/1732713838713/Put/seqid=0 2024-11-27T13:24:00,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741957_1133 (size=9857) 2024-11-27T13:24:00,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:00,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44600 deadline: 1732713900509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:00,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44650 deadline: 1732713900509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:00,542 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=559 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a5e5fb5974c74c729dec4140422ce726 2024-11-27T13:24:00,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/bcf29f38d2c242328892e3ee51ec0b0e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bcf29f38d2c242328892e3ee51ec0b0e 2024-11-27T13:24:00,551 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bcf29f38d2c242328892e3ee51ec0b0e, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T13:24:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/fe9c17ea3bcd405b9e515e7fdb7855cd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/fe9c17ea3bcd405b9e515e7fdb7855cd 2024-11-27T13:24:00,555 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/fe9c17ea3bcd405b9e515e7fdb7855cd, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T13:24:00,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/a5e5fb5974c74c729dec4140422ce726 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a5e5fb5974c74c729dec4140422ce726 2024-11-27T13:24:00,560 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a5e5fb5974c74c729dec4140422ce726, entries=100, sequenceid=559, filesize=9.6 K 2024-11-27T13:24:00,561 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 2b5b15f41df6d1ae2583263f41ba6257 in 1305ms, sequenceid=559, compaction requested=false 2024-11-27T13:24:00,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:24:00,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:00,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-27T13:24:00,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-27T13:24:00,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-27T13:24:00,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6120 sec 2024-11-27T13:24:00,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.6170 sec 2024-11-27T13:24:01,052 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:24:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-27T13:24:01,053 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-27T13:24:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:24:01,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-27T13:24:01,158 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:59011 2024-11-27T13:24:01,158 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:59011 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:24:01,158 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,158 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:24:01,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:01,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b50a4586a5ea4637b5dc3e04e59099de is 50, key is test_row_0/A:col10/1732713841157/Put/seqid=0 2024-11-27T13:24:01,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741958_1134 (size=12301) 2024-11-27T13:24:01,172 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:59011 2024-11-27T13:24:01,172 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,512 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:59011 2024-11-27T13:24:01,512 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,515 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:59011 2024-11-27T13:24:01,515 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 109 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5430 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5521 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2415 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7240 rows 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2413 2024-11-27T13:24:01,515 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7238 rows 2024-11-27T13:24:01,515 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:24:01,515 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:59011 2024-11-27T13:24:01,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:01,519 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:24:01,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:24:01,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:01,532 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713841531"}]},"ts":"1732713841531"} 2024-11-27T13:24:01,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:01,533 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:24:01,537 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:24:01,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:24:01,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, UNASSIGN}] 2024-11-27T13:24:01,544 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, UNASSIGN 2024-11-27T13:24:01,545 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=2b5b15f41df6d1ae2583263f41ba6257, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:01,546 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:24:01,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:01,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b50a4586a5ea4637b5dc3e04e59099de 2024-11-27T13:24:01,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6ca0e05fd9cb4cd999b1d31880749dd8 is 50, key is test_row_0/B:col10/1732713841157/Put/seqid=0 2024-11-27T13:24:01,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741959_1135 (size=12301) 2024-11-27T13:24:01,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:01,701 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:01,702 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:24:01,702 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:24:01,703 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 2b5b15f41df6d1ae2583263f41ba6257, disabling compactions & flushes 2024-11-27T13:24:01,703 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:01,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:01,981 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6ca0e05fd9cb4cd999b1d31880749dd8 2024-11-27T13:24:01,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/dbeffea9814b4bf2adba7a4fefbb0e8f is 50, key is test_row_0/C:col10/1732713841157/Put/seqid=0 2024-11-27T13:24:01,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741960_1136 (size=12301) 2024-11-27T13:24:02,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:02,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=592 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/dbeffea9814b4bf2adba7a4fefbb0e8f 2024-11-27T13:24:02,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/b50a4586a5ea4637b5dc3e04e59099de as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b50a4586a5ea4637b5dc3e04e59099de 2024-11-27T13:24:02,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b50a4586a5ea4637b5dc3e04e59099de, entries=150, sequenceid=592, filesize=12.0 K 2024-11-27T13:24:02,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/6ca0e05fd9cb4cd999b1d31880749dd8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6ca0e05fd9cb4cd999b1d31880749dd8 2024-11-27T13:24:02,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6ca0e05fd9cb4cd999b1d31880749dd8, entries=150, sequenceid=592, filesize=12.0 K 2024-11-27T13:24:02,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/dbeffea9814b4bf2adba7a4fefbb0e8f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/dbeffea9814b4bf2adba7a4fefbb0e8f 2024-11-27T13:24:02,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/dbeffea9814b4bf2adba7a4fefbb0e8f, entries=150, sequenceid=592, filesize=12.0 K 2024-11-27T13:24:02,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 2b5b15f41df6d1ae2583263f41ba6257 in 1258ms, sequenceid=592, compaction requested=true 2024-11-27T13:24:02,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:24:02,416 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. after waiting 0 ms 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:A 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:B 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2b5b15f41df6d1ae2583263f41ba6257:C, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. because compaction request was cancelled 2024-11-27T13:24:02,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:02,416 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2b5b15f41df6d1ae2583263f41ba6257:C 2024-11-27T13:24:02,416 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 2b5b15f41df6d1ae2583263f41ba6257 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=A 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=B 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2b5b15f41df6d1ae2583263f41ba6257, store=C 2024-11-27T13:24:02,416 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:02,420 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/2378d0a1b69e428380420a022a4415f5 is 50, key is test_row_1/A:col10/1732713841511/Put/seqid=0 2024-11-27T13:24:02,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741961_1137 (size=9857) 2024-11-27T13:24:02,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:02,824 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/2378d0a1b69e428380420a022a4415f5 2024-11-27T13:24:02,833 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e0b2a026a44d40f1add29cb99d906787 is 50, key is test_row_1/B:col10/1732713841511/Put/seqid=0 2024-11-27T13:24:02,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741962_1138 (size=9857) 2024-11-27T13:24:03,238 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e0b2a026a44d40f1add29cb99d906787 2024-11-27T13:24:03,246 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/64dec593282e4f75b67504ad1bf6f79f is 50, key is test_row_1/C:col10/1732713841511/Put/seqid=0 2024-11-27T13:24:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741963_1139 (size=9857) 2024-11-27T13:24:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:03,650 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=598 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/64dec593282e4f75b67504ad1bf6f79f 2024-11-27T13:24:03,655 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/A/2378d0a1b69e428380420a022a4415f5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/2378d0a1b69e428380420a022a4415f5 2024-11-27T13:24:03,659 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/2378d0a1b69e428380420a022a4415f5, entries=100, sequenceid=598, filesize=9.6 K 2024-11-27T13:24:03,660 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/B/e0b2a026a44d40f1add29cb99d906787 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e0b2a026a44d40f1add29cb99d906787 2024-11-27T13:24:03,664 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e0b2a026a44d40f1add29cb99d906787, entries=100, sequenceid=598, filesize=9.6 K 2024-11-27T13:24:03,664 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/.tmp/C/64dec593282e4f75b67504ad1bf6f79f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/64dec593282e4f75b67504ad1bf6f79f 2024-11-27T13:24:03,669 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/64dec593282e4f75b67504ad1bf6f79f, entries=100, sequenceid=598, filesize=9.6 K 2024-11-27T13:24:03,669 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 2b5b15f41df6d1ae2583263f41ba6257 in 1253ms, sequenceid=598, compaction requested=true 2024-11-27T13:24:03,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/317b1dbae93a424a915fb0518f0ba360, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/a4224da74cab4325b28e7e0e473cb91c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0931184f20134207aea79d2662c55d93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/6fe3024f53f24576a692e0a99dcf1d68, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/46f7f8981d174e4c8e8e2f79319d4a9f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/50588092548741718b0144a36a7c3f51, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/4d767c0d57464647a463dc3b63287225, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9bc831087ad942a78a2a5481e1d0842a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8093ed0786814a2e8ee00b437bb03969, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/01f5e9061008417f807130c6d7da9549, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f] to archive 2024-11-27T13:24:03,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:03,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7ca45dda8c3e4c0db16a56b501f817ca 2024-11-27T13:24:03,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b19b533cd48a4ddb9da75b87d0dee48b 2024-11-27T13:24:03,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/279f1e117fd94748985a858290c6ee6f 2024-11-27T13:24:03,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/317b1dbae93a424a915fb0518f0ba360 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/317b1dbae93a424a915fb0518f0ba360 2024-11-27T13:24:03,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bef83f31cb094b7a92787c0f1ba0f78e 2024-11-27T13:24:03,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/a4224da74cab4325b28e7e0e473cb91c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/a4224da74cab4325b28e7e0e473cb91c 2024-11-27T13:24:03,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/15178bbebec34fa198dd3f79177dd2e5 2024-11-27T13:24:03,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cc3af2d0ca67416cbdbb2bdb7805d17b 2024-11-27T13:24:03,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e74a2d900d5e42878300cae752dffbe6 2024-11-27T13:24:03,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/06e6f2716c25495d93670217fa7db097 2024-11-27T13:24:03,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0931184f20134207aea79d2662c55d93 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0931184f20134207aea79d2662c55d93 2024-11-27T13:24:03,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/472e23c51a4a4db39e0b4c425eecbba2 2024-11-27T13:24:03,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/7f3f5dd0e1ef4116ad9ecc8a5ae84c1b 2024-11-27T13:24:03,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/6fe3024f53f24576a692e0a99dcf1d68 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/6fe3024f53f24576a692e0a99dcf1d68 2024-11-27T13:24:03,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/e8c14504f93d4de9ae1c2e6a33a2d0b1 2024-11-27T13:24:03,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/46f7f8981d174e4c8e8e2f79319d4a9f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/46f7f8981d174e4c8e8e2f79319d4a9f 2024-11-27T13:24:03,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/533588c3ceca4431868c9b856dcc216a 2024-11-27T13:24:03,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d9c730ff2a9d4fd68abafc8b4051ae96 2024-11-27T13:24:03,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/50588092548741718b0144a36a7c3f51 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/50588092548741718b0144a36a7c3f51 2024-11-27T13:24:03,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0213d036c9d64522a11802937ad70c93 2024-11-27T13:24:03,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/d8df0d7921f44646baa613cd48cf11bd 2024-11-27T13:24:03,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8ba83e4481ff46f39c5fb4cb8f1e1ca6 2024-11-27T13:24:03,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/4d767c0d57464647a463dc3b63287225 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/4d767c0d57464647a463dc3b63287225 2024-11-27T13:24:03,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9c98f196f3e0480c9cce920a7e9b7e7f 2024-11-27T13:24:03,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8872e7124cd4a02bf8715d4d3be3b2e 2024-11-27T13:24:03,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9bc831087ad942a78a2a5481e1d0842a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9bc831087ad942a78a2a5481e1d0842a 2024-11-27T13:24:03,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/9d18baf650834fa8a1544cafcf093699 2024-11-27T13:24:03,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fc39e7359c8e44438f7c999ec6339c7c 2024-11-27T13:24:03,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8093ed0786814a2e8ee00b437bb03969 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/8093ed0786814a2e8ee00b437bb03969 2024-11-27T13:24:03,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/35d58ffdb254496982021dc67086c9bc 2024-11-27T13:24:03,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/cdc4acb85c0444fe89d4c61e0d551a47 2024-11-27T13:24:03,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/0b442d7df5c84bb68b90893bc3c44719 2024-11-27T13:24:03,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/01f5e9061008417f807130c6d7da9549 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/01f5e9061008417f807130c6d7da9549 2024-11-27T13:24:03,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/54e3b619b39a482a8155365da3c11c58 2024-11-27T13:24:03,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/fdff8ad491b4498996fbc570a0b6e39e 2024-11-27T13:24:03,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/79f09da0154942b6b633ab50509838df 2024-11-27T13:24:03,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/41d639d1381549f8bdc404d763d694f9 2024-11-27T13:24:03,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/805acbc8d8004ef69f87448daf13563f 2024-11-27T13:24:03,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/231cfbcafc8e4ee2822e59b56a03d4ed, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b998eb9fae874cd6b231028f81bdd6a4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/8b7f358acc2243b49101ba8ac97b424a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/95df8d94d3404a9d85adf329824d8413, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/2124c083881c4f9da0520ce46e99e173, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5b386dc35ea343a5936ec02c043b847b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d671851a39db407cbc21fb26091bae5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e4502befd7304960957465215e5153fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6c051565d40420a889ec441e3e29d0a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1502f0c2198b48a586222569dfe29dec, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014] to archive 2024-11-27T13:24:03,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:03,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/0545530537ef4c48bcbea29101ba6e04 2024-11-27T13:24:03,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b9919bf354f74e918f9167d2056408a7 2024-11-27T13:24:03,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/231cfbcafc8e4ee2822e59b56a03d4ed to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/231cfbcafc8e4ee2822e59b56a03d4ed 2024-11-27T13:24:03,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d36cb9932e7245839d8ef3fc8385973d 2024-11-27T13:24:03,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/48b60cbc74884b039bc7d576649be4df 2024-11-27T13:24:03,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b998eb9fae874cd6b231028f81bdd6a4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/b998eb9fae874cd6b231028f81bdd6a4 2024-11-27T13:24:03,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c2cb1252e3704cc6954b617dda18be2a 2024-11-27T13:24:03,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/94f73969940f42e7bee05882e15c62b5 2024-11-27T13:24:03,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/24ec0d1e605c4900b4b4a279cfa057e6 2024-11-27T13:24:03,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/8b7f358acc2243b49101ba8ac97b424a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/8b7f358acc2243b49101ba8ac97b424a 2024-11-27T13:24:03,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/64d156a6b9364479a17a5cb4cb5012ce 2024-11-27T13:24:03,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6ae6861db204beea807eadaab25c5bb 2024-11-27T13:24:03,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/95df8d94d3404a9d85adf329824d8413 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/95df8d94d3404a9d85adf329824d8413 2024-11-27T13:24:03,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e3307c280da548059beed50a59698182 2024-11-27T13:24:03,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/3b8ed677b79d4b978eff5d7587cd2d78 2024-11-27T13:24:03,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/2124c083881c4f9da0520ce46e99e173 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/2124c083881c4f9da0520ce46e99e173 2024-11-27T13:24:03,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6b350bcca7af486cae4e4596498a4e0d 2024-11-27T13:24:03,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a2e34151755b48628e1e6f316b090666 2024-11-27T13:24:03,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5b386dc35ea343a5936ec02c043b847b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5b386dc35ea343a5936ec02c043b847b 2024-11-27T13:24:03,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/c6929faaab28489ab48a72f45fd8209f 2024-11-27T13:24:03,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/07952e890a2b4087b05509c7624a484c 2024-11-27T13:24:03,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d671851a39db407cbc21fb26091bae5a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/d671851a39db407cbc21fb26091bae5a 2024-11-27T13:24:03,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/694299885db7474796605084518b30d5 2024-11-27T13:24:03,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/bae67d98c159409e8868fa5f7a53eb0d 2024-11-27T13:24:03,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e4502befd7304960957465215e5153fb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e4502befd7304960957465215e5153fb 2024-11-27T13:24:03,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/77fe60eda94346d0a5f4480af3f93bbe 2024-11-27T13:24:03,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/320e98c84dcf46739ace6db9a62b17ff 2024-11-27T13:24:03,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/ab11be31be5f4e3d959a1927c105ec7b 2024-11-27T13:24:03,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6c051565d40420a889ec441e3e29d0a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a6c051565d40420a889ec441e3e29d0a 2024-11-27T13:24:03,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/a18728d88bce4f428cdcec036c85b645 2024-11-27T13:24:03,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/54264c569ad840b288800ca4d5a3a8c0 2024-11-27T13:24:03,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1502f0c2198b48a586222569dfe29dec to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1502f0c2198b48a586222569dfe29dec 2024-11-27T13:24:03,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/46d161d925544eeebca5371a3643287b 2024-11-27T13:24:03,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/75f1242bc8d248ad9f7aacf3e43318da 2024-11-27T13:24:03,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/dcbdc597fc9c476b9e5bb3ecd9ded7f0 2024-11-27T13:24:03,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/5d7b555ed0d445efa3464851f373b52d 2024-11-27T13:24:03,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/1de22228ce5e474fa701eb374cdf6f5a 2024-11-27T13:24:03,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/cc1b507524d14c48bfc48ec91ad4f014 2024-11-27T13:24:03,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9f769a3eef834840abcb38a51ddc11bd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b23110d16a004fb192eb060a794967ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fd92c336fe4d413fba1d6655e0d1570c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a3cde8669d4a40b0bde5279fdf13a762, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/122b33ddf7b54d9eba39992487aaabc2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/0429ffb9b2124be289eb2810761f6bf8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8f7657618b704adaaee8c0bc56af2ede, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/67ab94fc262046a0aed2a3a349ac541c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9b723e219c40483890a9c1abf156ec95, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac] to archive 2024-11-27T13:24:03,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:03,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/3c3ebce4a2dc4d0394a50ca7a20283b6 2024-11-27T13:24:03,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/21455a60c885466caff1479e6bba3dee 2024-11-27T13:24:03,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9f769a3eef834840abcb38a51ddc11bd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9f769a3eef834840abcb38a51ddc11bd 2024-11-27T13:24:03,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fff1b25b9bfd4749b5325e3e37984cc3 2024-11-27T13:24:03,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/486db83cb0ee40f2a96bf137528ba72c 2024-11-27T13:24:03,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b23110d16a004fb192eb060a794967ff to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b23110d16a004fb192eb060a794967ff 2024-11-27T13:24:03,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8ad5d83226c7484f8baa3e9e229fb2dc 2024-11-27T13:24:03,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/94b80acda74a4a57ad6e07ee232387f6 2024-11-27T13:24:03,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/bafbf99522e2480eac18756d0d6d8cf3 2024-11-27T13:24:03,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fd92c336fe4d413fba1d6655e0d1570c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/fd92c336fe4d413fba1d6655e0d1570c 2024-11-27T13:24:03,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff9a99fa0af74cf4a1694f3fa37184c6 2024-11-27T13:24:03,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/03cb92e7a94d4eaaa508507b7a323b8f 2024-11-27T13:24:03,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a3cde8669d4a40b0bde5279fdf13a762 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a3cde8669d4a40b0bde5279fdf13a762 2024-11-27T13:24:03,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/48045c300a72423cae35611940a3a939 2024-11-27T13:24:03,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/d3efab72dedf4c04b0e68615b2e130b5 2024-11-27T13:24:03,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/122b33ddf7b54d9eba39992487aaabc2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/122b33ddf7b54d9eba39992487aaabc2 2024-11-27T13:24:03,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/4aabe551828c4d9eb2658521538c2b82 2024-11-27T13:24:03,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9db48292456645919b6439df6a78e9cf 2024-11-27T13:24:03,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/0429ffb9b2124be289eb2810761f6bf8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/0429ffb9b2124be289eb2810761f6bf8 2024-11-27T13:24:03,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/936b93fcd50c40eaa8adf37c8781dfa0 2024-11-27T13:24:03,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a02beae7aafc4a48897419e230ec95ef 2024-11-27T13:24:03,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8f7657618b704adaaee8c0bc56af2ede to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8f7657618b704adaaee8c0bc56af2ede 2024-11-27T13:24:03,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/ff186b8acd18427f84e2e230e1fc8774 2024-11-27T13:24:03,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/68aa49bd6cbf42f7989ba1b2c5276003 2024-11-27T13:24:03,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/67ab94fc262046a0aed2a3a349ac541c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/67ab94fc262046a0aed2a3a349ac541c 2024-11-27T13:24:03,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/84ba983dcbf442a6b3c91070895bc754 2024-11-27T13:24:03,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/6545ad442eb54ce0bef83f4c42ab768c 2024-11-27T13:24:03,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/df8db77ff14549e6af2d5fbb44ff4362 2024-11-27T13:24:03,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/b3e9bc9b52ca464e8abdf20bb4618a0f 2024-11-27T13:24:03,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/446e8414100749949940a009abda11d7 2024-11-27T13:24:03,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9b723e219c40483890a9c1abf156ec95 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/9b723e219c40483890a9c1abf156ec95 2024-11-27T13:24:03,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/07a5210f21584275aac37e90d5662d46 2024-11-27T13:24:03,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/8fad37c81fae4b81a646cc01986d4751 2024-11-27T13:24:03,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/2e5b55b104124fc78ac9a6dfcae4371b 2024-11-27T13:24:03,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/5c18c50430bb496c8cb0ec81d53fbe49 2024-11-27T13:24:03,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/01b830598ac74ee99e2517e8b5dab7a8 2024-11-27T13:24:03,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/49157907ca264ddfbf0dd1f4501292ac 2024-11-27T13:24:03,850 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/recovered.edits/601.seqid, newMaxSeqId=601, maxSeqId=1 2024-11-27T13:24:03,853 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257. 2024-11-27T13:24:03,853 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 2b5b15f41df6d1ae2583263f41ba6257: 2024-11-27T13:24:03,855 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:24:03,856 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=2b5b15f41df6d1ae2583263f41ba6257, regionState=CLOSED 2024-11-27T13:24:03,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-27T13:24:03,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 2b5b15f41df6d1ae2583263f41ba6257, server=a0541979a851,32819,1732713812705 in 2.3110 sec 2024-11-27T13:24:03,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-27T13:24:03,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2b5b15f41df6d1ae2583263f41ba6257, UNASSIGN in 2.3150 sec 2024-11-27T13:24:03,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-27T13:24:03,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.3220 sec 2024-11-27T13:24:03,864 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713843863"}]},"ts":"1732713843863"} 2024-11-27T13:24:03,865 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:24:03,867 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:24:03,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3420 sec 2024-11-27T13:24:04,786 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T13:24:04,788 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T13:24:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-27T13:24:05,638 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-27T13:24:05,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:24:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,647 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-27T13:24:05,651 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:24:05,654 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/recovered.edits] 2024-11-27T13:24:05,657 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/2378d0a1b69e428380420a022a4415f5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/2378d0a1b69e428380420a022a4415f5 2024-11-27T13:24:05,658 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b50a4586a5ea4637b5dc3e04e59099de to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/b50a4586a5ea4637b5dc3e04e59099de 2024-11-27T13:24:05,660 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bcf29f38d2c242328892e3ee51ec0b0e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/bcf29f38d2c242328892e3ee51ec0b0e 2024-11-27T13:24:05,661 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8e0c620e5974dd0bfbca2e2722908aa to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/A/c8e0c620e5974dd0bfbca2e2722908aa 2024-11-27T13:24:05,663 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/453ca59ec7ce41c1ad0dc109b6c5ec09 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/453ca59ec7ce41c1ad0dc109b6c5ec09 2024-11-27T13:24:05,665 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6ca0e05fd9cb4cd999b1d31880749dd8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/6ca0e05fd9cb4cd999b1d31880749dd8 2024-11-27T13:24:05,666 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e0b2a026a44d40f1add29cb99d906787 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/e0b2a026a44d40f1add29cb99d906787 2024-11-27T13:24:05,667 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/fe9c17ea3bcd405b9e515e7fdb7855cd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/B/fe9c17ea3bcd405b9e515e7fdb7855cd 2024-11-27T13:24:05,669 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/64dec593282e4f75b67504ad1bf6f79f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/64dec593282e4f75b67504ad1bf6f79f 2024-11-27T13:24:05,670 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/85511a4bc4c349c1922735d8f2377460 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/85511a4bc4c349c1922735d8f2377460 2024-11-27T13:24:05,671 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a5e5fb5974c74c729dec4140422ce726 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/a5e5fb5974c74c729dec4140422ce726 2024-11-27T13:24:05,673 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/dbeffea9814b4bf2adba7a4fefbb0e8f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/C/dbeffea9814b4bf2adba7a4fefbb0e8f 2024-11-27T13:24:05,676 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/recovered.edits/601.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257/recovered.edits/601.seqid 2024-11-27T13:24:05,676 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/2b5b15f41df6d1ae2583263f41ba6257 2024-11-27T13:24:05,676 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:24:05,681 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-27T13:24:05,688 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:24:05,721 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:24:05,722 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,722 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:24:05,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713845722"}]},"ts":"9223372036854775807"} 2024-11-27T13:24:05,725 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:24:05,725 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2b5b15f41df6d1ae2583263f41ba6257, NAME => 'TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:24:05,725 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:24:05,725 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713845725"}]},"ts":"9223372036854775807"} 2024-11-27T13:24:05,727 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:24:05,729 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 87 msec 2024-11-27T13:24:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-27T13:24:05,749 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-27T13:24:05,761 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_OPEN_REGION-regionserver/a0541979a851:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_374866773_22 at /127.0.0.1:44568 [Waiting for operation #389] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1987755983_22 at /127.0.0.1:44404 [Waiting for operation #399] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/a0541979a851:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;a0541979a851:32819-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=456 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=451 (was 385) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4342 (was 4858) 2024-11-27T13:24:05,770 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=451, ProcessCount=11, AvailableMemoryMB=4341 2024-11-27T13:24:05,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:24:05,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:24:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:05,774 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:24:05,774 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:05,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-11-27T13:24:05,775 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:24:05,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:05,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741964_1140 (size=963) 2024-11-27T13:24:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:06,183 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:24:06,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741965_1141 (size=53) 2024-11-27T13:24:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3e8c8da06d0d53cf44a4ff2a4693ab7d, disabling compactions & flushes 2024-11-27T13:24:06,590 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. after waiting 0 ms 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,590 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,590 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:06,592 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:24:06,592 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713846592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713846592"}]},"ts":"1732713846592"} 2024-11-27T13:24:06,593 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:24:06,594 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:24:06,594 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713846594"}]},"ts":"1732713846594"} 2024-11-27T13:24:06,595 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:24:06,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, ASSIGN}] 2024-11-27T13:24:06,600 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, ASSIGN 2024-11-27T13:24:06,600 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:24:06,751 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:06,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:06,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:06,904 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:06,908 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,908 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:24:06,909 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,909 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:24:06,909 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,909 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,910 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,912 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:06,912 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName A 2024-11-27T13:24:06,912 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:06,913 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:06,913 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,914 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:06,915 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName B 2024-11-27T13:24:06,915 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:06,915 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:06,915 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,916 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:06,917 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName C 2024-11-27T13:24:06,917 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:06,917 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:06,917 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,918 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,918 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,920 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:24:06,921 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:06,923 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:24:06,923 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 3e8c8da06d0d53cf44a4ff2a4693ab7d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71623735, jitterRate=0.06727682054042816}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:24:06,924 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:06,924 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., pid=43, masterSystemTime=1732713846904 2024-11-27T13:24:06,926 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,926 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:06,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:06,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-27T13:24:06,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 in 176 msec 2024-11-27T13:24:06,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-27T13:24:06,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, ASSIGN in 331 msec 2024-11-27T13:24:06,932 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:24:06,932 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713846932"}]},"ts":"1732713846932"} 2024-11-27T13:24:06,933 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:24:06,936 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:24:06,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1640 sec 2024-11-27T13:24:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-27T13:24:07,880 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-27T13:24:07,883 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04506927 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a9b9802 2024-11-27T13:24:07,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118b007e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:07,888 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:07,890 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:07,892 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:24:07,894 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:24:07,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:24:07,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:24:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:07,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741966_1142 (size=999) 2024-11-27T13:24:08,319 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-27T13:24:08,319 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-27T13:24:08,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:24:08,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, REOPEN/MOVE}] 2024-11-27T13:24:08,333 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, REOPEN/MOVE 2024-11-27T13:24:08,334 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,335 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:24:08,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:08,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:08,487 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,487 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:24:08,487 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 3e8c8da06d0d53cf44a4ff2a4693ab7d, disabling compactions & flushes 2024-11-27T13:24:08,487 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,487 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,487 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. after waiting 0 ms 2024-11-27T13:24:08,487 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,491 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T13:24:08,492 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,492 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:08,492 WARN [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 3e8c8da06d0d53cf44a4ff2a4693ab7d to self. 2024-11-27T13:24:08,493 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,494 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=CLOSED 2024-11-27T13:24:08,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-27T13:24:08,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 in 160 msec 2024-11-27T13:24:08,496 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, REOPEN/MOVE; state=CLOSED, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=true 2024-11-27T13:24:08,647 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:08,800 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:08,803 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,803 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:24:08,804 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,804 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:24:08,804 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,804 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,806 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,807 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:08,812 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName A 2024-11-27T13:24:08,814 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:08,814 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:08,815 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,816 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:08,816 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName B 2024-11-27T13:24:08,816 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:08,816 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:08,816 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,817 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:08,817 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e8c8da06d0d53cf44a4ff2a4693ab7d columnFamilyName C 2024-11-27T13:24:08,817 DEBUG [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:08,817 INFO [StoreOpener-3e8c8da06d0d53cf44a4ff2a4693ab7d-1 {}] regionserver.HStore(327): Store=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:08,818 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,818 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,819 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,821 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:24:08,822 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,823 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 3e8c8da06d0d53cf44a4ff2a4693ab7d; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67329725, jitterRate=0.003291085362434387}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:24:08,825 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:08,825 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., pid=48, masterSystemTime=1732713848800 2024-11-27T13:24:08,827 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,827 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:08,827 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=OPEN, openSeqNum=5, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-11-27T13:24:08,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 in 181 msec 2024-11-27T13:24:08,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-27T13:24:08,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, REOPEN/MOVE in 498 msec 2024-11-27T13:24:08,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-27T13:24:08,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 511 msec 2024-11-27T13:24:08,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 934 msec 2024-11-27T13:24:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-27T13:24:08,845 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7362d978 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cae6c5c 2024-11-27T13:24:08,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7d6279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,852 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-11-27T13:24:08,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-11-27T13:24:08,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,870 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-11-27T13:24:08,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,879 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-27T13:24:08,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,885 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-11-27T13:24:08,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,890 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-11-27T13:24:08,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,894 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-11-27T13:24:08,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,898 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-11-27T13:24:08,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:08,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:08,908 DEBUG [hconnection-0x2ac3e101-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-27T13:24:08,910 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:08,911 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:08,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:08,913 DEBUG [hconnection-0x6b4524be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,914 DEBUG [hconnection-0x160afe77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,914 DEBUG [hconnection-0x2696aa23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,915 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,915 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,915 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,916 DEBUG [hconnection-0x3a83afe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,916 DEBUG [hconnection-0x3dfb4f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,917 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,917 DEBUG [hconnection-0xa5c643a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,918 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,924 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,927 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,929 DEBUG [hconnection-0x27a3616c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,931 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,932 DEBUG [hconnection-0x12e2ac45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:08,933 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43984, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:08,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:24:08,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:08,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:08,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:08,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:08,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:08,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:08,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:08,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713908967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:08,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713908969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713908973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713908977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713908979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:09,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a347d4b9e56c4bb584bd36ff73d3be7e_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713848927/Put/seqid=0 2024-11-27T13:24:09,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741967_1143 (size=12154) 2024-11-27T13:24:09,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713909082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713909083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713909084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713909084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713909082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:09,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713909287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713909288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713909289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713909290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713909290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,381 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:24:09,440 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:09,447 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a347d4b9e56c4bb584bd36ff73d3be7e_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a347d4b9e56c4bb584bd36ff73d3be7e_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:09,449 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/69f8c27c2d12442489a811281ba662e9, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:09,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/69f8c27c2d12442489a811281ba662e9 is 175, key is test_row_0/A:col10/1732713848927/Put/seqid=0 2024-11-27T13:24:09,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741968_1144 (size=30955) 2024-11-27T13:24:09,494 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/69f8c27c2d12442489a811281ba662e9 2024-11-27T13:24:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:09,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2aa4f7d3abda48288d626a412e328e22 is 50, key is test_row_0/B:col10/1732713848927/Put/seqid=0 2024-11-27T13:24:09,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741969_1145 (size=12001) 2024-11-27T13:24:09,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713909598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713909603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713909604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713909605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713909606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:09,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,838 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2aa4f7d3abda48288d626a412e328e22 2024-11-27T13:24:09,991 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:09,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:09,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:09,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:09,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/127c146cd3ce4b1fb7a2d06b2f984531 is 50, key is test_row_0/C:col10/1732713848927/Put/seqid=0 2024-11-27T13:24:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741970_1146 (size=12001) 2024-11-27T13:24:10,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713910101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:10,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:10,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713910108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:10,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713910109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:10,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:10,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713910110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:10,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713910112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:10,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:10,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:10,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:10,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,299 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:10,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:10,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:10,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/127c146cd3ce4b1fb7a2d06b2f984531 2024-11-27T13:24:10,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/69f8c27c2d12442489a811281ba662e9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9 2024-11-27T13:24:10,454 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:10,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:10,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:10,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:10,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9, entries=150, sequenceid=17, filesize=30.2 K 2024-11-27T13:24:10,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2aa4f7d3abda48288d626a412e328e22 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22 2024-11-27T13:24:10,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T13:24:10,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/127c146cd3ce4b1fb7a2d06b2f984531 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531 2024-11-27T13:24:10,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T13:24:10,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1538ms, sequenceid=17, compaction requested=false 2024-11-27T13:24:10,474 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-27T13:24:10,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:10,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:10,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-27T13:24:10,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:10,608 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:10,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:10,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411277182e1b98bd24a8a9b90c8196ef8f2a3_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713848966/Put/seqid=0 2024-11-27T13:24:10,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741971_1147 (size=12154) 2024-11-27T13:24:10,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:10,658 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411277182e1b98bd24a8a9b90c8196ef8f2a3_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411277182e1b98bd24a8a9b90c8196ef8f2a3_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:10,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/2f2e1dd0dc4d4940ac2470603b92a99c, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:10,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/2f2e1dd0dc4d4940ac2470603b92a99c is 175, key is test_row_0/A:col10/1732713848966/Put/seqid=0 2024-11-27T13:24:10,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741972_1148 (size=30955) 2024-11-27T13:24:11,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:11,091 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/2f2e1dd0dc4d4940ac2470603b92a99c 2024-11-27T13:24:11,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/88bfcdfed8304f18a4f3441bb0564a49 is 50, key is test_row_0/B:col10/1732713848966/Put/seqid=0 2024-11-27T13:24:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:11,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:11,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741973_1149 (size=12001) 2024-11-27T13:24:11,134 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/88bfcdfed8304f18a4f3441bb0564a49 2024-11-27T13:24:11,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713911128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713911131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713911133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713911135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713911136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/9b18f70e385c4ca588bbaacd66f2acdb is 50, key is test_row_0/C:col10/1732713848966/Put/seqid=0 2024-11-27T13:24:11,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741974_1150 (size=12001) 2024-11-27T13:24:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713911237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713911238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713911239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713911240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713911240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713911442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713911442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713911444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713911444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713911445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,583 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/9b18f70e385c4ca588bbaacd66f2acdb 2024-11-27T13:24:11,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/2f2e1dd0dc4d4940ac2470603b92a99c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c 2024-11-27T13:24:11,596 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c, entries=150, sequenceid=40, filesize=30.2 K 2024-11-27T13:24:11,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/88bfcdfed8304f18a4f3441bb0564a49 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49 2024-11-27T13:24:11,608 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49, entries=150, sequenceid=40, filesize=11.7 K 2024-11-27T13:24:11,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/9b18f70e385c4ca588bbaacd66f2acdb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb 2024-11-27T13:24:11,622 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb, entries=150, sequenceid=40, filesize=11.7 K 2024-11-27T13:24:11,624 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1016ms, sequenceid=40, compaction requested=false 2024-11-27T13:24:11,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:11,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:11,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-27T13:24:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-27T13:24:11,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-27T13:24:11,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7150 sec 2024-11-27T13:24:11,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.7220 sec 2024-11-27T13:24:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:11,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:24:11,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:11,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c86e85ab8232424f81b475ab085da415_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:11,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713911767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713911768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713911768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713911774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713911774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741975_1151 (size=12154) 2024-11-27T13:24:11,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713911875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713911875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713911876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713911878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:11,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713911878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713912080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713912080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713912080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713912081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713912081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,187 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:12,192 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c86e85ab8232424f81b475ab085da415_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c86e85ab8232424f81b475ab085da415_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:12,193 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7a8a35a84ecf4fb299fbbd9fb0232f02, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:12,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7a8a35a84ecf4fb299fbbd9fb0232f02 is 175, key is test_row_0/A:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741976_1152 (size=30955) 2024-11-27T13:24:12,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713912384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713912384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713912384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713912385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713912387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,603 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7a8a35a84ecf4fb299fbbd9fb0232f02 2024-11-27T13:24:12,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2e6c7dd8eb3c4875b0a053d9d824d2ea is 50, key is test_row_0/B:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741977_1153 (size=12001) 2024-11-27T13:24:12,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2e6c7dd8eb3c4875b0a053d9d824d2ea 2024-11-27T13:24:12,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/1a3ebf3dc1924f15ab257b2a5c39c948 is 50, key is test_row_0/C:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741978_1154 (size=12001) 2024-11-27T13:24:12,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/1a3ebf3dc1924f15ab257b2a5c39c948 2024-11-27T13:24:12,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7a8a35a84ecf4fb299fbbd9fb0232f02 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02 2024-11-27T13:24:12,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02, entries=150, sequenceid=55, filesize=30.2 K 2024-11-27T13:24:12,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2e6c7dd8eb3c4875b0a053d9d824d2ea as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea 2024-11-27T13:24:12,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:24:12,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/1a3ebf3dc1924f15ab257b2a5c39c948 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948 2024-11-27T13:24:12,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:24:12,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 940ms, sequenceid=55, compaction requested=true 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:12,688 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:12,688 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:12,691 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:12,691 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:12,692 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:12,692 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=35.2 K 2024-11-27T13:24:12,692 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:12,692 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:12,692 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:12,692 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2aa4f7d3abda48288d626a412e328e22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713848927 2024-11-27T13:24:12,692 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=90.7 K 2024-11-27T13:24:12,692 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:12,692 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02] 2024-11-27T13:24:12,694 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 88bfcdfed8304f18a4f3441bb0564a49, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732713848966 2024-11-27T13:24:12,694 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69f8c27c2d12442489a811281ba662e9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713848927 2024-11-27T13:24:12,694 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e6c7dd8eb3c4875b0a053d9d824d2ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:12,695 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f2e1dd0dc4d4940ac2470603b92a99c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732713848966 2024-11-27T13:24:12,695 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a8a35a84ecf4fb299fbbd9fb0232f02, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:12,732 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#134 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:12,733 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/abed73b3a75e4b7987866089fab30a56 is 50, key is test_row_0/B:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,744 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:12,756 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127f0d528e1413a4be88d333b8b405395d3_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741979_1155 (size=12104) 2024-11-27T13:24:12,763 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127f0d528e1413a4be88d333b8b405395d3_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:12,763 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f0d528e1413a4be88d333b8b405395d3_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:12,767 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/abed73b3a75e4b7987866089fab30a56 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/abed73b3a75e4b7987866089fab30a56 2024-11-27T13:24:12,775 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into abed73b3a75e4b7987866089fab30a56(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:12,775 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:12,775 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713852688; duration=0sec 2024-11-27T13:24:12,775 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:12,776 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:12,776 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:12,778 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:12,778 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:12,778 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:12,778 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=35.2 K 2024-11-27T13:24:12,779 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 127c146cd3ce4b1fb7a2d06b2f984531, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713848927 2024-11-27T13:24:12,779 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b18f70e385c4ca588bbaacd66f2acdb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732713848966 2024-11-27T13:24:12,781 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a3ebf3dc1924f15ab257b2a5c39c948, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:12,794 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#136 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:12,795 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/3eb000482946438c95778fb1d27d2ce5 is 50, key is test_row_0/C:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741980_1156 (size=4469) 2024-11-27T13:24:12,803 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#135 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:12,807 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/349d64cfa0d64f76b010a3e4df30982f is 175, key is test_row_0/A:col10/1732713851745/Put/seqid=0 2024-11-27T13:24:12,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741981_1157 (size=12104) 2024-11-27T13:24:12,830 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/3eb000482946438c95778fb1d27d2ce5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3eb000482946438c95778fb1d27d2ce5 2024-11-27T13:24:12,839 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 3eb000482946438c95778fb1d27d2ce5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:12,839 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:12,839 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=13, startTime=1732713852688; duration=0sec 2024-11-27T13:24:12,839 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:12,839 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:12,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741982_1158 (size=31058) 2024-11-27T13:24:12,858 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/349d64cfa0d64f76b010a3e4df30982f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f 2024-11-27T13:24:12,869 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 349d64cfa0d64f76b010a3e4df30982f(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:12,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:12,869 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713852688; duration=0sec 2024-11-27T13:24:12,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:12,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:12,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:24:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:12,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:12,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:12,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:12,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:12,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:12,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713912905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713912905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270796924298e940e7bae95a7a01fc8038_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713852892/Put/seqid=0 2024-11-27T13:24:12,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713912908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713912908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713912910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:12,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741983_1159 (size=12154) 2024-11-27T13:24:13,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713913012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713913012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713913013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713913014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713913014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-27T13:24:13,018 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-27T13:24:13,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-27T13:24:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:13,023 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:13,023 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:13,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:13,176 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713913215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713913216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713913216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713913216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713913217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:13,329 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,331 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:13,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:13,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,337 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270796924298e940e7bae95a7a01fc8038_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270796924298e940e7bae95a7a01fc8038_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:13,339 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/b8e05d7040974ad0854877133bd34a36, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:13,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/b8e05d7040974ad0854877133bd34a36 is 175, key is test_row_0/A:col10/1732713852892/Put/seqid=0 2024-11-27T13:24:13,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741984_1160 (size=30955) 2024-11-27T13:24:13,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:13,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713913519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713913519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713913519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713913521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713913521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:13,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,747 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/b8e05d7040974ad0854877133bd34a36 2024-11-27T13:24:13,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ef4185db13a84447b7a4bbabc04e0bee is 50, key is test_row_0/B:col10/1732713852892/Put/seqid=0 2024-11-27T13:24:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741985_1161 (size=12001) 2024-11-27T13:24:13,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ef4185db13a84447b7a4bbabc04e0bee 2024-11-27T13:24:13,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6d7378fa42b445c183d42774efe083c3 is 50, key is test_row_0/C:col10/1732713852892/Put/seqid=0 2024-11-27T13:24:13,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741986_1162 (size=12001) 2024-11-27T13:24:13,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6d7378fa42b445c183d42774efe083c3 2024-11-27T13:24:13,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/b8e05d7040974ad0854877133bd34a36 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36 2024-11-27T13:24:13,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:13,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36, entries=150, sequenceid=81, filesize=30.2 K 2024-11-27T13:24:13,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ef4185db13a84447b7a4bbabc04e0bee as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee 2024-11-27T13:24:13,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee, entries=150, sequenceid=81, filesize=11.7 K 2024-11-27T13:24:13,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6d7378fa42b445c183d42774efe083c3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3 2024-11-27T13:24:13,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3, entries=150, sequenceid=81, filesize=11.7 K 2024-11-27T13:24:13,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 919ms, sequenceid=81, compaction requested=false 2024-11-27T13:24:13,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:13,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:13,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-27T13:24:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:13,947 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:13,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:13,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127692fc6e379b74f09bef38a8ba9c67b7f_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713852908/Put/seqid=0 2024-11-27T13:24:13,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741987_1163 (size=12154) 2024-11-27T13:24:13,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:13,967 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127692fc6e379b74f09bef38a8ba9c67b7f_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127692fc6e379b74f09bef38a8ba9c67b7f_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1d7245475db841f0871f2b77681f09be, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1d7245475db841f0871f2b77681f09be is 175, key is test_row_0/A:col10/1732713852908/Put/seqid=0 2024-11-27T13:24:13,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741988_1164 (size=30955) 2024-11-27T13:24:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:14,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:14,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:14,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,306 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:24:14,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,390 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1d7245475db841f0871f2b77681f09be 2024-11-27T13:24:14,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d33e028000bf4af298bf43f6a5dc4864 is 50, key is test_row_0/B:col10/1732713852908/Put/seqid=0 2024-11-27T13:24:14,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741989_1165 (size=12001) 2024-11-27T13:24:14,418 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d33e028000bf4af298bf43f6a5dc4864 2024-11-27T13:24:14,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/8fc7d09392104bb5992a94f385eb2133 is 50, key is test_row_0/C:col10/1732713852908/Put/seqid=0 2024-11-27T13:24:14,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741990_1166 (size=12001) 2024-11-27T13:24:14,462 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/8fc7d09392104bb5992a94f385eb2133 2024-11-27T13:24:14,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1d7245475db841f0871f2b77681f09be as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be 2024-11-27T13:24:14,474 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be, entries=150, sequenceid=94, filesize=30.2 K 2024-11-27T13:24:14,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d33e028000bf4af298bf43f6a5dc4864 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864 2024-11-27T13:24:14,488 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864, entries=150, sequenceid=94, filesize=11.7 K 2024-11-27T13:24:14,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/8fc7d09392104bb5992a94f385eb2133 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133 2024-11-27T13:24:14,495 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133, entries=150, sequenceid=94, filesize=11.7 K 2024-11-27T13:24:14,496 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 549ms, sequenceid=94, compaction requested=true 2024-11-27T13:24:14,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:14,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:14,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-27T13:24:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-27T13:24:14,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-27T13:24:14,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4740 sec 2024-11-27T13:24:14,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.4810 sec 2024-11-27T13:24:14,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:14,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:14,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:14,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112795f860b9af10482982ba32134b741f7b_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:14,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741991_1167 (size=12154) 2024-11-27T13:24:14,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713914976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713914977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713914977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713914979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:14,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:14,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713914980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,093 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:15,099 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112795f860b9af10482982ba32134b741f7b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112795f860b9af10482982ba32134b741f7b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:15,100 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7fe3cad95d634ef2bad8e6477d4d4136, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7fe3cad95d634ef2bad8e6477d4d4136 is 175, key is test_row_0/A:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:15,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741992_1168 (size=30955) 2024-11-27T13:24:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-27T13:24:15,127 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-27T13:24:15,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:15,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-27T13:24:15,130 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:15,131 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:15,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:15,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:15,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713915280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713915280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713915281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,282 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:15,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T13:24:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713915284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713915285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:15,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:15,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T13:24:15,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:15,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,507 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7fe3cad95d634ef2bad8e6477d4d4136 2024-11-27T13:24:15,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/243abb24ac234f8180d2a14e6139142d is 50, key is test_row_0/B:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:15,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741993_1169 (size=12001) 2024-11-27T13:24:15,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/243abb24ac234f8180d2a14e6139142d 2024-11-27T13:24:15,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d947605031944860a7277fa4edd178e6 is 50, key is test_row_0/C:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:15,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741994_1170 (size=12001) 2024-11-27T13:24:15,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d947605031944860a7277fa4edd178e6 2024-11-27T13:24:15,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7fe3cad95d634ef2bad8e6477d4d4136 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136 2024-11-27T13:24:15,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136, entries=150, sequenceid=120, filesize=30.2 K 2024-11-27T13:24:15,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/243abb24ac234f8180d2a14e6139142d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d 2024-11-27T13:24:15,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d, entries=150, sequenceid=120, filesize=11.7 K 2024-11-27T13:24:15,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:15,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T13:24:15,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:15,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d947605031944860a7277fa4edd178e6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6 2024-11-27T13:24:15,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6, entries=150, sequenceid=120, filesize=11.7 K 2024-11-27T13:24:15,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 950ms, sequenceid=120, compaction requested=true 2024-11-27T13:24:15,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:15,611 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:15,611 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:15,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:15,613 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:15,613 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:15,613 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,614 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=121.0 K 2024-11-27T13:24:15,614 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,614 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136] 2024-11-27T13:24:15,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:15,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:15,614 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,615 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/abed73b3a75e4b7987866089fab30a56, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=47.0 K 2024-11-27T13:24:15,615 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting abed73b3a75e4b7987866089fab30a56, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:15,616 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 349d64cfa0d64f76b010a3e4df30982f, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:15,616 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ef4185db13a84447b7a4bbabc04e0bee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732713851771 2024-11-27T13:24:15,616 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8e05d7040974ad0854877133bd34a36, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732713851771 2024-11-27T13:24:15,617 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d33e028000bf4af298bf43f6a5dc4864, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713852906 2024-11-27T13:24:15,617 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d7245475db841f0871f2b77681f09be, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713852906 2024-11-27T13:24:15,618 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fe3cad95d634ef2bad8e6477d4d4136, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:15,618 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 243abb24ac234f8180d2a14e6139142d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:15,632 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,647 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#147 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:15,647 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/c80377eb1f174fbd8e21ffd91966cf8d is 50, key is test_row_0/B:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:15,657 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127580e0a319f9c4253a9235baec90a0455_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,661 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127580e0a319f9c4253a9235baec90a0455_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,662 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127580e0a319f9c4253a9235baec90a0455_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741996_1172 (size=4469) 2024-11-27T13:24:15,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741995_1171 (size=12241) 2024-11-27T13:24:15,699 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/c80377eb1f174fbd8e21ffd91966cf8d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/c80377eb1f174fbd8e21ffd91966cf8d 2024-11-27T13:24:15,729 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into c80377eb1f174fbd8e21ffd91966cf8d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:15,729 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:15,729 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=12, startTime=1732713855611; duration=0sec 2024-11-27T13:24:15,729 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:15,729 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:15,729 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:15,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:15,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:15,733 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:15,733 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3eb000482946438c95778fb1d27d2ce5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=47.0 K 2024-11-27T13:24:15,734 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eb000482946438c95778fb1d27d2ce5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713851134 2024-11-27T13:24:15,734 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d7378fa42b445c183d42774efe083c3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732713851771 2024-11-27T13:24:15,735 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fc7d09392104bb5992a94f385eb2133, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713852906 2024-11-27T13:24:15,735 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d947605031944860a7277fa4edd178e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:15,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:15,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-27T13:24:15,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:15,753 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:15,776 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#148 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:15,777 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/3c1635ac363a44baac1be33b4066edd7 is 50, key is test_row_0/C:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:15,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:15,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:15,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112770e59a6370e94ea999eef48c36112b2b_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713854668/Put/seqid=0 2024-11-27T13:24:15,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741997_1173 (size=12241) 2024-11-27T13:24:15,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713915815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713915815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713915817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,824 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/3c1635ac363a44baac1be33b4066edd7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3c1635ac363a44baac1be33b4066edd7 2024-11-27T13:24:15,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713915817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713915818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,836 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 3c1635ac363a44baac1be33b4066edd7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:15,836 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:15,836 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=12, startTime=1732713855611; duration=0sec 2024-11-27T13:24:15,836 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:15,836 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:15,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741998_1174 (size=12204) 2024-11-27T13:24:15,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:15,856 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112770e59a6370e94ea999eef48c36112b2b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112770e59a6370e94ea999eef48c36112b2b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:15,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/06b132a09fe64fa38857b16d5cf355d1, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/06b132a09fe64fa38857b16d5cf355d1 is 175, key is test_row_0/A:col10/1732713854668/Put/seqid=0 2024-11-27T13:24:15,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741999_1175 (size=31005) 2024-11-27T13:24:15,895 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/06b132a09fe64fa38857b16d5cf355d1 2024-11-27T13:24:15,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713915922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713915922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2af1977a522a4c23b55575ef4c894ba4 is 50, key is test_row_0/B:col10/1732713854668/Put/seqid=0 2024-11-27T13:24:15,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713915925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713915927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:15,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713915930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742000_1176 (size=12051) 2024-11-27T13:24:15,965 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2af1977a522a4c23b55575ef4c894ba4 2024-11-27T13:24:15,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/469bbc9eb26747909bf48e9064ed5211 is 50, key is test_row_0/C:col10/1732713854668/Put/seqid=0 2024-11-27T13:24:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742001_1177 (size=12051) 2024-11-27T13:24:16,083 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#146 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:16,084 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c76814b1137a40abb419b3913db23d49 is 175, key is test_row_0/A:col10/1732713854045/Put/seqid=0 2024-11-27T13:24:16,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742002_1178 (size=31195) 2024-11-27T13:24:16,095 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c76814b1137a40abb419b3913db23d49 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49 2024-11-27T13:24:16,100 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into c76814b1137a40abb419b3913db23d49(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:16,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:16,101 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=12, startTime=1732713855611; duration=0sec 2024-11-27T13:24:16,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:16,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:16,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713916125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713916128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713916131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713916132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713916133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:16,387 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/469bbc9eb26747909bf48e9064ed5211 2024-11-27T13:24:16,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/06b132a09fe64fa38857b16d5cf355d1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1 2024-11-27T13:24:16,400 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1, entries=150, sequenceid=131, filesize=30.3 K 2024-11-27T13:24:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2af1977a522a4c23b55575ef4c894ba4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4 2024-11-27T13:24:16,407 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4, entries=150, sequenceid=131, filesize=11.8 K 2024-11-27T13:24:16,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/469bbc9eb26747909bf48e9064ed5211 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211 2024-11-27T13:24:16,415 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211, entries=150, sequenceid=131, filesize=11.8 K 2024-11-27T13:24:16,416 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 664ms, sequenceid=131, compaction requested=false 2024-11-27T13:24:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:16,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-27T13:24:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-27T13:24:16,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-27T13:24:16,419 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2860 sec 2024-11-27T13:24:16,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.2920 sec 2024-11-27T13:24:16,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:16,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T13:24:16,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:16,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:16,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:16,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:16,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:16,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:16,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713916437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713916437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713916438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e68732296cf44de19154b6a645afee1c_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:16,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713916439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713916440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742003_1179 (size=14794) 2024-11-27T13:24:16,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713916542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713916544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713916544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713916545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713916745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713916747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713916747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713916748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:16,876 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:16,881 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e68732296cf44de19154b6a645afee1c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e68732296cf44de19154b6a645afee1c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:16,882 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1367fdb477d74103b112a5987d2c4350, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:16,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1367fdb477d74103b112a5987d2c4350 is 175, key is test_row_0/A:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:16,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742004_1180 (size=39749) 2024-11-27T13:24:16,890 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1367fdb477d74103b112a5987d2c4350 2024-11-27T13:24:16,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3e8e82b435c94292ae3bd6accc21e068 is 50, key is test_row_0/B:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:16,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742005_1181 (size=12151) 2024-11-27T13:24:16,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:16,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713916942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713917050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713917050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713917052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713917054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-27T13:24:17,235 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-27T13:24:17,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:17,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-27T13:24:17,238 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:17,239 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:17,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:17,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3e8e82b435c94292ae3bd6accc21e068 2024-11-27T13:24:17,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/cbba95b4008946e48d5412ae74763e96 is 50, key is test_row_0/C:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:17,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742006_1182 (size=12151) 2024-11-27T13:24:17,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/cbba95b4008946e48d5412ae74763e96 2024-11-27T13:24:17,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/1367fdb477d74103b112a5987d2c4350 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350 2024-11-27T13:24:17,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:17,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350, entries=200, sequenceid=160, filesize=38.8 K 2024-11-27T13:24:17,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3e8e82b435c94292ae3bd6accc21e068 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068 2024-11-27T13:24:17,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068, entries=150, sequenceid=160, filesize=11.9 K 2024-11-27T13:24:17,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/cbba95b4008946e48d5412ae74763e96 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96 2024-11-27T13:24:17,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96, entries=150, sequenceid=160, filesize=11.9 K 2024-11-27T13:24:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 933ms, sequenceid=160, compaction requested=true 2024-11-27T13:24:17,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:17,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,364 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:17,364 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:17,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,365 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:17,365 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:17,365 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:17,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,365 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=99.6 K 2024-11-27T13:24:17,365 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:17,365 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:17,365 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350] 2024-11-27T13:24:17,366 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:17,366 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:17,366 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/c80377eb1f174fbd8e21ffd91966cf8d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=35.6 K 2024-11-27T13:24:17,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,366 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c76814b1137a40abb419b3913db23d49, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:17,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,367 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c80377eb1f174fbd8e21ffd91966cf8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:17,367 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06b132a09fe64fa38857b16d5cf355d1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732713854663 2024-11-27T13:24:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,367 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2af1977a522a4c23b55575ef4c894ba4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732713854663 2024-11-27T13:24:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,368 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e8e82b435c94292ae3bd6accc21e068, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:17,368 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1367fdb477d74103b112a5987d2c4350, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,382 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:17,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,391 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#156 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:17,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,391 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:17,391 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d2e09f2887ab47899f90620874639413 is 50, key is test_row_0/B:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-27T13:24:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:17,392 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:17,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:17,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:17,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,400 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112764b51fe4af3341c193d24fbd538fedd7_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:17,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,404 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112764b51fe4af3341c193d24fbd538fedd7_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:17,404 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112764b51fe4af3341c193d24fbd538fedd7_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:17,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742007_1183 (size=12493) 2024-11-27T13:24:17,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a7af6ea882404cad80481537666f9f53_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_1/A:col10/1732713856436/Put/seqid=0 2024-11-27T13:24:17,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,413 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d2e09f2887ab47899f90620874639413 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d2e09f2887ab47899f90620874639413 2024-11-27T13:24:17,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,422 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into d2e09f2887ab47899f90620874639413(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:17,422 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:17,422 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713857364; duration=0sec 2024-11-27T13:24:17,422 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:17,422 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:17,422 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:17,423 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:17,424 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:17,424 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:17,424 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3c1635ac363a44baac1be33b4066edd7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=35.6 K 2024-11-27T13:24:17,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c1635ac363a44baac1be33b4066edd7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732713854045 2024-11-27T13:24:17,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 469bbc9eb26747909bf48e9064ed5211, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732713854663 2024-11-27T13:24:17,428 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cbba95b4008946e48d5412ae74763e96, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742008_1184 (size=4469) 2024-11-27T13:24:17,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742009_1185 (size=9814) 2024-11-27T13:24:17,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,460 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a7af6ea882404cad80481537666f9f53_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a7af6ea882404cad80481537666f9f53_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:17,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/ffd8e18a0c494dbf99f81b6e9a04d3b0, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:17,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 is 175, key is test_row_1/A:col10/1732713856436/Put/seqid=0 2024-11-27T13:24:17,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,469 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#158 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,470 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/1f84a0ad2b994d6ea7a27e98c6242013 is 50, key is test_row_0/C:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742010_1186 (size=22461) 2024-11-27T13:24:17,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742011_1187 (size=12493) 2024-11-27T13:24:17,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:17,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,544 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/1f84a0ad2b994d6ea7a27e98c6242013 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1f84a0ad2b994d6ea7a27e98c6242013 2024-11-27T13:24:17,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,551 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 1f84a0ad2b994d6ea7a27e98c6242013(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:17,551 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:17,551 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=13, startTime=1732713857364; duration=0sec 2024-11-27T13:24:17,551 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:17,551 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:17,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:17,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:17,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:17,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713917641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713917641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713917642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713917642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713917744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713917746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713917747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713917748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:17,847 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#155 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:17,848 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/191c84358a6f4d19b83afb40746cfff7 is 175, key is test_row_0/A:col10/1732713855806/Put/seqid=0 2024-11-27T13:24:17,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742012_1188 (size=31447) 2024-11-27T13:24:17,918 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 2024-11-27T13:24:17,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3abb7bdc2efa421583704435d1d21f9b is 50, key is test_row_1/B:col10/1732713856436/Put/seqid=0 2024-11-27T13:24:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742013_1189 (size=9757) 2024-11-27T13:24:17,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713917946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713917950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713917950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713917951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:17,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:17,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713917952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,196 INFO [master/a0541979a851:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-27T13:24:18,196 INFO [master/a0541979a851:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-27T13:24:18,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713918250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713918252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713918254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713918254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,260 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/191c84358a6f4d19b83afb40746cfff7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7 2024-11-27T13:24:18,266 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 191c84358a6f4d19b83afb40746cfff7(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:18,266 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:18,266 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713857364; duration=0sec 2024-11-27T13:24:18,266 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:18,266 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:18,336 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3abb7bdc2efa421583704435d1d21f9b 2024-11-27T13:24:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/65e1873866af4a4392ff260cf2636ef2 is 50, key is test_row_1/C:col10/1732713856436/Put/seqid=0 2024-11-27T13:24:18,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742014_1190 (size=9757) 2024-11-27T13:24:18,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713918752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713918757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713918757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713918760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:18,763 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/65e1873866af4a4392ff260cf2636ef2 2024-11-27T13:24:18,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 2024-11-27T13:24:18,775 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0, entries=100, sequenceid=169, filesize=21.9 K 2024-11-27T13:24:18,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/3abb7bdc2efa421583704435d1d21f9b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b 2024-11-27T13:24:18,782 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b, entries=100, sequenceid=169, filesize=9.5 K 2024-11-27T13:24:18,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/65e1873866af4a4392ff260cf2636ef2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2 2024-11-27T13:24:18,796 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2, entries=100, sequenceid=169, filesize=9.5 K 2024-11-27T13:24:18,797 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1405ms, sequenceid=169, compaction requested=false 2024-11-27T13:24:18,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:18,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:18,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-27T13:24:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-27T13:24:18,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-27T13:24:18,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5580 sec 2024-11-27T13:24:18,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.5640 sec 2024-11-27T13:24:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-27T13:24:19,346 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-27T13:24:19,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:19,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-27T13:24:19,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:19,348 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:19,349 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:19,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:19,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:19,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:19,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-27T13:24:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:19,502 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T13:24:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:19,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b9367fbc6d7e4c5185f56260cc674b46_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713857641/Put/seqid=0 2024-11-27T13:24:19,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742015_1191 (size=12304) 2024-11-27T13:24:19,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:19,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:19,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:19,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713919779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713919800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713919800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713919800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713919903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713919903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713919903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713919904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:19,925 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b9367fbc6d7e4c5185f56260cc674b46_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b9367fbc6d7e4c5185f56260cc674b46_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:19,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/14a3cd94873a4031a9d687b68cda38e7, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:19,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/14a3cd94873a4031a9d687b68cda38e7 is 175, key is test_row_0/A:col10/1732713857641/Put/seqid=0 2024-11-27T13:24:19,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742016_1192 (size=31105) 2024-11-27T13:24:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:19,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713919959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:19,961 DEBUG [Thread-697 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:20,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713920107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713920108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713920109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713920109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,333 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/14a3cd94873a4031a9d687b68cda38e7 2024-11-27T13:24:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e8b17ca0f9104a5a8c0cc87315d7552b is 50, key is test_row_0/B:col10/1732713857641/Put/seqid=0 2024-11-27T13:24:20,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742017_1193 (size=12151) 2024-11-27T13:24:20,347 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e8b17ca0f9104a5a8c0cc87315d7552b 2024-11-27T13:24:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/86fc01d1e5f24c6eb4b6e34de347853d is 50, key is test_row_0/C:col10/1732713857641/Put/seqid=0 2024-11-27T13:24:20,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742018_1194 (size=12151) 2024-11-27T13:24:20,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713920411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713920411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713920414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713920416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:20,792 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/86fc01d1e5f24c6eb4b6e34de347853d 2024-11-27T13:24:20,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/14a3cd94873a4031a9d687b68cda38e7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7 2024-11-27T13:24:20,804 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7, entries=150, sequenceid=199, filesize=30.4 K 2024-11-27T13:24:20,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e8b17ca0f9104a5a8c0cc87315d7552b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b 2024-11-27T13:24:20,809 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b, entries=150, sequenceid=199, filesize=11.9 K 2024-11-27T13:24:20,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/86fc01d1e5f24c6eb4b6e34de347853d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d 2024-11-27T13:24:20,815 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d, entries=150, sequenceid=199, filesize=11.9 K 2024-11-27T13:24:20,816 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1314ms, sequenceid=199, compaction requested=true 2024-11-27T13:24:20,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:20,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:20,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-27T13:24:20,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-27T13:24:20,819 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-27T13:24:20,819 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4680 sec 2024-11-27T13:24:20,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4730 sec 2024-11-27T13:24:20,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:20,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:20,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:20,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:20,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:20,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:20,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:20,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:20,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b007ec6d810e43468bf3f0d329653d89_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:20,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713920944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713920946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713920946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:20,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713920948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:20,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742019_1195 (size=12304) 2024-11-27T13:24:20,955 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:20,960 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b007ec6d810e43468bf3f0d329653d89_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b007ec6d810e43468bf3f0d329653d89_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:20,961 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c09db2ae602848b895606421e412d088, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:20,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c09db2ae602848b895606421e412d088 is 175, key is test_row_0/A:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:20,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742020_1196 (size=31105) 2024-11-27T13:24:21,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713921050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713921051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713921051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713921051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713921254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713921254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713921254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713921255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,369 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c09db2ae602848b895606421e412d088 2024-11-27T13:24:21,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/5459a312736f4fc0948b96881aee9357 is 50, key is test_row_0/B:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:21,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742021_1197 (size=12151) 2024-11-27T13:24:21,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/5459a312736f4fc0948b96881aee9357 2024-11-27T13:24:21,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/bc680f33f71343b980d3fabff0724c4d is 50, key is test_row_0/C:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:21,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742022_1198 (size=12151) 2024-11-27T13:24:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-27T13:24:21,453 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-27T13:24:21,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-27T13:24:21,456 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:21,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T13:24:21,456 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:21,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T13:24:21,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713921558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713921558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713921559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713921559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:21,607 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:21,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-27T13:24:21,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:21,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T13:24:21,760 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:21,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-27T13:24:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:21,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/bc680f33f71343b980d3fabff0724c4d 2024-11-27T13:24:21,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/c09db2ae602848b895606421e412d088 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088 2024-11-27T13:24:21,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088, entries=150, sequenceid=211, filesize=30.4 K 2024-11-27T13:24:21,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/5459a312736f4fc0948b96881aee9357 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357 2024-11-27T13:24:21,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357, entries=150, sequenceid=211, filesize=11.9 K 2024-11-27T13:24:21,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/bc680f33f71343b980d3fabff0724c4d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d 2024-11-27T13:24:21,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d, entries=150, sequenceid=211, filesize=11.9 K 2024-11-27T13:24:21,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 914ms, sequenceid=211, compaction requested=true 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:21,834 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:21,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:21,834 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:21,835 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:21,835 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 116118 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:21,835 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:21,835 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:21,835 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,836 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,836 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d2e09f2887ab47899f90620874639413, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=45.5 K 2024-11-27T13:24:21,836 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=113.4 K 2024-11-27T13:24:21,836 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,836 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088] 2024-11-27T13:24:21,836 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d2e09f2887ab47899f90620874639413, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:21,837 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191c84358a6f4d19b83afb40746cfff7, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:21,837 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3abb7bdc2efa421583704435d1d21f9b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732713856436 2024-11-27T13:24:21,837 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffd8e18a0c494dbf99f81b6e9a04d3b0, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732713856436 2024-11-27T13:24:21,838 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e8b17ca0f9104a5a8c0cc87315d7552b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732713857637 2024-11-27T13:24:21,838 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14a3cd94873a4031a9d687b68cda38e7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732713857637 2024-11-27T13:24:21,838 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5459a312736f4fc0948b96881aee9357, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732713860916 2024-11-27T13:24:21,838 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c09db2ae602848b895606421e412d088, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732713860916 2024-11-27T13:24:21,848 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:21,851 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#168 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:21,851 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d82f99f92e094f6e866870c8e5b48545 is 50, key is test_row_0/B:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:21,852 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411272e5f63d3a34b403c856ca3861ffe12e6_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:21,854 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411272e5f63d3a34b403c856ca3861ffe12e6_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:21,854 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272e5f63d3a34b403c856ca3861ffe12e6_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742024_1200 (size=4469) 2024-11-27T13:24:21,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742023_1199 (size=12629) 2024-11-27T13:24:21,914 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:21,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-27T13:24:21,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:21,915 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:21,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:21,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c20dc189b3dd41c7a33dbf45fe75ccea_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713860945/Put/seqid=0 2024-11-27T13:24:21,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742025_1201 (size=12304) 2024-11-27T13:24:21,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:21,947 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c20dc189b3dd41c7a33dbf45fe75ccea_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c20dc189b3dd41c7a33dbf45fe75ccea_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:21,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/3af7ca8c2fcd4e6d85855e1daceeb082, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:21,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/3af7ca8c2fcd4e6d85855e1daceeb082 is 175, key is test_row_0/A:col10/1732713860945/Put/seqid=0 2024-11-27T13:24:21,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742026_1202 (size=31105) 2024-11-27T13:24:21,981 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/3af7ca8c2fcd4e6d85855e1daceeb082 2024-11-27T13:24:22,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/11ad3e2d258545f3950a22b8b1ef5538 is 50, key is test_row_0/B:col10/1732713860945/Put/seqid=0 2024-11-27T13:24:22,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742027_1203 (size=12151) 2024-11-27T13:24:22,008 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/11ad3e2d258545f3950a22b8b1ef5538 2024-11-27T13:24:22,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d5206f539a8345cbb74eaa5335c749d8 is 50, key is test_row_0/C:col10/1732713860945/Put/seqid=0 2024-11-27T13:24:22,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742028_1204 (size=12151) 2024-11-27T13:24:22,039 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d5206f539a8345cbb74eaa5335c749d8 2024-11-27T13:24:22,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/3af7ca8c2fcd4e6d85855e1daceeb082 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082 2024-11-27T13:24:22,050 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082, entries=150, sequenceid=236, filesize=30.4 K 2024-11-27T13:24:22,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/11ad3e2d258545f3950a22b8b1ef5538 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538 2024-11-27T13:24:22,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,056 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538, entries=150, sequenceid=236, filesize=11.9 K 2024-11-27T13:24:22,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/d5206f539a8345cbb74eaa5335c749d8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8 2024-11-27T13:24:22,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,066 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8, entries=150, sequenceid=236, filesize=11.9 K 2024-11-27T13:24:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,067 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 152ms, sequenceid=236, compaction requested=true 2024-11-27T13:24:22,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-27T13:24:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-27T13:24:22,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-27T13:24:22,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 614 msec 2024-11-27T13:24:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 618 msec 2024-11-27T13:24:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:22,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:22,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:22,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:22,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:22,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112742354e9b12774f7abdabf97ed9324411_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742030_1206 (size=24758) 2024-11-27T13:24:22,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713922220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713922220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713922222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713922222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,264 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#167 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:22,266 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/73064a94fed4452981fd166e1cfe352f is 175, key is test_row_0/A:col10/1732713860916/Put/seqid=0 2024-11-27T13:24:22,272 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/d82f99f92e094f6e866870c8e5b48545 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d82f99f92e094f6e866870c8e5b48545 2024-11-27T13:24:22,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742031_1207 (size=31583) 2024-11-27T13:24:22,281 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into d82f99f92e094f6e866870c8e5b48545(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:22,281 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,281 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=12, startTime=1732713861834; duration=0sec 2024-11-27T13:24:22,284 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/73064a94fed4452981fd166e1cfe352f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f 2024-11-27T13:24:22,284 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:22,285 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:22,285 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:24:22,289 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 58703 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:24:22,289 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:22,289 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,289 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1f84a0ad2b994d6ea7a27e98c6242013, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=57.3 K 2024-11-27T13:24:22,290 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f84a0ad2b994d6ea7a27e98c6242013, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713855806 2024-11-27T13:24:22,290 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 65e1873866af4a4392ff260cf2636ef2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732713856436 2024-11-27T13:24:22,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 86fc01d1e5f24c6eb4b6e34de347853d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732713857637 2024-11-27T13:24:22,292 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bc680f33f71343b980d3fabff0724c4d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732713860916 2024-11-27T13:24:22,292 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 73064a94fed4452981fd166e1cfe352f(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:22,292 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,292 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=12, startTime=1732713861834; duration=0sec 2024-11-27T13:24:22,292 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d5206f539a8345cbb74eaa5335c749d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732713860942 2024-11-27T13:24:22,292 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:22,292 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:22,308 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#173 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:22,308 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6048aeb67e1a4ec290c1f707b89b5fea is 50, key is test_row_0/C:col10/1732713860945/Put/seqid=0 2024-11-27T13:24:22,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742032_1208 (size=12663) 2024-11-27T13:24:22,326 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6048aeb67e1a4ec290c1f707b89b5fea as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6048aeb67e1a4ec290c1f707b89b5fea 2024-11-27T13:24:22,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713922324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713922330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713922331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713922331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,335 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 6048aeb67e1a4ec290c1f707b89b5fea(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:22,335 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,335 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=11, startTime=1732713861834; duration=0sec 2024-11-27T13:24:22,335 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:22,335 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:22,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713922530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713922534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713922535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713922535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-27T13:24:22,559 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-27T13:24:22,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-27T13:24:22,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:22,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:22,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:22,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:22,595 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:22,600 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112742354e9b12774f7abdabf97ed9324411_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742354e9b12774f7abdabf97ed9324411_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:22,601 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/98fe953c80aa4170b3f8bae096dceff5, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:22,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/98fe953c80aa4170b3f8bae096dceff5 is 175, key is test_row_0/A:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742029_1205 (size=74395) 2024-11-27T13:24:22,605 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/98fe953c80aa4170b3f8bae096dceff5 2024-11-27T13:24:22,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/45a1ac3df3684176ab8b02115234101f is 50, key is test_row_0/B:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742033_1209 (size=12151) 2024-11-27T13:24:22,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/45a1ac3df3684176ab8b02115234101f 2024-11-27T13:24:22,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/2b1b9ce8a4544e1d8892553697540f65 is 50, key is test_row_0/C:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742034_1210 (size=12151) 2024-11-27T13:24:22,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/2b1b9ce8a4544e1d8892553697540f65 2024-11-27T13:24:22,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/98fe953c80aa4170b3f8bae096dceff5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5 2024-11-27T13:24:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:22,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5, entries=400, sequenceid=247, filesize=72.7 K 2024-11-27T13:24:22,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/45a1ac3df3684176ab8b02115234101f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f 2024-11-27T13:24:22,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f, entries=150, sequenceid=247, filesize=11.9 K 2024-11-27T13:24:22,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/2b1b9ce8a4544e1d8892553697540f65 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65 2024-11-27T13:24:22,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65, entries=150, sequenceid=247, filesize=11.9 K 2024-11-27T13:24:22,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 534ms, sequenceid=247, compaction requested=true 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:22,680 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:22,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:22,680 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:22,681 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137083 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:22,681 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:22,682 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,682 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=133.9 K 2024-11-27T13:24:22,682 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,682 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5] 2024-11-27T13:24:22,682 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73064a94fed4452981fd166e1cfe352f, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732713860916 2024-11-27T13:24:22,682 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:22,683 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:22,683 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,683 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d82f99f92e094f6e866870c8e5b48545, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.1 K 2024-11-27T13:24:22,683 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3af7ca8c2fcd4e6d85855e1daceeb082, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732713860942 2024-11-27T13:24:22,684 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d82f99f92e094f6e866870c8e5b48545, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732713860916 2024-11-27T13:24:22,684 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98fe953c80aa4170b3f8bae096dceff5, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732713862098 2024-11-27T13:24:22,684 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 11ad3e2d258545f3950a22b8b1ef5538, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732713860942 2024-11-27T13:24:22,685 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 45a1ac3df3684176ab8b02115234101f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732713862101 2024-11-27T13:24:22,695 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:22,706 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#177 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:22,707 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/64db691230124403b68d3a825e64b766 is 50, key is test_row_0/B:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,716 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:22,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-27T13:24:22,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:22,716 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:22,717 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127d1db5f2b34534a8eb6eca2fd56d8c051_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:22,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:22,723 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127d1db5f2b34534a8eb6eca2fd56d8c051_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:22,724 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d1db5f2b34534a8eb6eca2fd56d8c051_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:22,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742035_1211 (size=12731) 2024-11-27T13:24:22,751 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/64db691230124403b68d3a825e64b766 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/64db691230124403b68d3a825e64b766 2024-11-27T13:24:22,758 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 64db691230124403b68d3a825e64b766(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:22,758 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:22,758 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713862680; duration=0sec 2024-11-27T13:24:22,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:22,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:22,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-27T13:24:22,760 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-27T13:24:22,760 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-27T13:24:22,760 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. because compaction request was cancelled 2024-11-27T13:24:22,760 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:22,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270c00ee67634948379ab36c50dd8cc70d_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713862219/Put/seqid=0 2024-11-27T13:24:22,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742036_1212 (size=4469) 2024-11-27T13:24:22,776 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#176 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:22,776 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a1cd84aa6ae14ff3818ef8036ff2056c is 175, key is test_row_0/A:col10/1732713862101/Put/seqid=0 2024-11-27T13:24:22,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742038_1214 (size=31685) 2024-11-27T13:24:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742037_1213 (size=12454) 2024-11-27T13:24:22,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:22,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713922843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713922843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713922844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713922845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:22,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713922946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713922946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713922948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:22,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:22,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713922948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713923149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713923149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713923150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713923150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:23,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,192 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270c00ee67634948379ab36c50dd8cc70d_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270c00ee67634948379ab36c50dd8cc70d_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:23,193 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a1cd84aa6ae14ff3818ef8036ff2056c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c 2024-11-27T13:24:23,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/636ba085d31a4791becc3cde674e73c5, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:23,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/636ba085d31a4791becc3cde674e73c5 is 175, key is test_row_0/A:col10/1732713862219/Put/seqid=0 2024-11-27T13:24:23,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742039_1215 (size=31255) 2024-11-27T13:24:23,203 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/636ba085d31a4791becc3cde674e73c5 2024-11-27T13:24:23,204 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into a1cd84aa6ae14ff3818ef8036ff2056c(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:23,204 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:23,204 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713862680; duration=0sec 2024-11-27T13:24:23,205 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:23,205 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:23,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/f269fe72bbbf4045957320378f88da5e is 50, key is test_row_0/B:col10/1732713862219/Put/seqid=0 2024-11-27T13:24:23,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742040_1216 (size=12301) 2024-11-27T13:24:23,256 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/f269fe72bbbf4045957320378f88da5e 2024-11-27T13:24:23,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/df5ea92bd71a47a5b0a62df99d93c555 is 50, key is test_row_0/C:col10/1732713862219/Put/seqid=0 2024-11-27T13:24:23,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742041_1217 (size=12301) 2024-11-27T13:24:23,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713923452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:23,675 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/df5ea92bd71a47a5b0a62df99d93c555 2024-11-27T13:24:23,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/636ba085d31a4791becc3cde674e73c5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5 2024-11-27T13:24:23,690 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5, entries=150, sequenceid=276, filesize=30.5 K 2024-11-27T13:24:23,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/f269fe72bbbf4045957320378f88da5e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e 2024-11-27T13:24:23,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,699 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e, entries=150, sequenceid=276, filesize=12.0 K 2024-11-27T13:24:23,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/df5ea92bd71a47a5b0a62df99d93c555 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555 2024-11-27T13:24:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,708 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555, entries=150, sequenceid=276, filesize=12.0 K 2024-11-27T13:24:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,709 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 993ms, sequenceid=276, compaction requested=true 2024-11-27T13:24:23,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:23,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:23,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-27T13:24:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-27T13:24:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-27T13:24:23,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1480 sec 2024-11-27T13:24:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.1520 sec 2024-11-27T13:24:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:23,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:23,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:23,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:23,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:23,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:23,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:23,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278637e169ca9a46068058492be36fb85c_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:23,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:23,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742042_1218 (size=14994) 2024-11-27T13:24:23,999 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:24,008 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278637e169ca9a46068058492be36fb85c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278637e169ca9a46068058492be36fb85c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:24,010 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/15bfc3e72548405b8e031f26e5132634, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/15bfc3e72548405b8e031f26e5132634 is 175, key is test_row_0/A:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742043_1219 (size=39949) 2024-11-27T13:24:24,021 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/15bfc3e72548405b8e031f26e5132634 2024-11-27T13:24:24,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a54b14b4589449158b7ee2d36ac5794c is 50, key is test_row_0/B:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742044_1220 (size=12301) 2024-11-27T13:24:24,050 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a54b14b4589449158b7ee2d36ac5794c 2024-11-27T13:24:24,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/524f628d0a05472f95e9b97cafa35e78 is 50, key is test_row_0/C:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742045_1221 (size=12301) 2024-11-27T13:24:24,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/524f628d0a05472f95e9b97cafa35e78 2024-11-27T13:24:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/15bfc3e72548405b8e031f26e5132634 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634 2024-11-27T13:24:24,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634, entries=200, sequenceid=290, filesize=39.0 K 2024-11-27T13:24:24,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a54b14b4589449158b7ee2d36ac5794c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c 2024-11-27T13:24:24,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c, entries=150, sequenceid=290, filesize=12.0 K 2024-11-27T13:24:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/524f628d0a05472f95e9b97cafa35e78 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78 2024-11-27T13:24:24,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78, entries=150, sequenceid=290, filesize=12.0 K 2024-11-27T13:24:24,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 539ms, sequenceid=290, compaction requested=true 2024-11-27T13:24:24,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:24,501 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:24,501 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:24,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:24,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:24,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:24,502 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,502 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=100.5 K 2024-11-27T13:24:24,502 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,502 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634] 2024-11-27T13:24:24,503 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:24,503 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:24,503 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,503 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/64db691230124403b68d3a825e64b766, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.5 K 2024-11-27T13:24:24,503 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1cd84aa6ae14ff3818ef8036ff2056c, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732713862101 2024-11-27T13:24:24,504 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 64db691230124403b68d3a825e64b766, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732713862101 2024-11-27T13:24:24,504 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 636ba085d31a4791becc3cde674e73c5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732713862218 2024-11-27T13:24:24,504 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f269fe72bbbf4045957320378f88da5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732713862218 2024-11-27T13:24:24,505 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15bfc3e72548405b8e031f26e5132634, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:24,505 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a54b14b4589449158b7ee2d36ac5794c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:24,514 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#184 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:24,514 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/96e3e8af8cac4119b62b3cbf5a1ef5bf is 50, key is test_row_0/B:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,516 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,529 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411274f5d10e3d3a742e9b3f59d3de5de3291_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,531 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411274f5d10e3d3a742e9b3f59d3de5de3291_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,531 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274f5d10e3d3a742e9b3f59d3de5de3291_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742046_1222 (size=12983) 2024-11-27T13:24:24,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742047_1223 (size=4469) 2024-11-27T13:24:24,565 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/96e3e8af8cac4119b62b3cbf5a1ef5bf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/96e3e8af8cac4119b62b3cbf5a1ef5bf 2024-11-27T13:24:24,572 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 96e3e8af8cac4119b62b3cbf5a1ef5bf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:24,572 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:24,572 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713864501; duration=0sec 2024-11-27T13:24:24,572 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:24,573 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:24,573 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:24,574 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:24,574 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:24,574 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,574 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6048aeb67e1a4ec290c1f707b89b5fea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=48.3 K 2024-11-27T13:24:24,575 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6048aeb67e1a4ec290c1f707b89b5fea, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732713860942 2024-11-27T13:24:24,576 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b1b9ce8a4544e1d8892553697540f65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732713862101 2024-11-27T13:24:24,576 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting df5ea92bd71a47a5b0a62df99d93c555, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732713862218 2024-11-27T13:24:24,576 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 524f628d0a05472f95e9b97cafa35e78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:24,598 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:24,598 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/87652677b3004779bae8af173d4301f1 is 50, key is test_row_0/C:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742048_1224 (size=12949) 2024-11-27T13:24:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:24,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:24:24,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:24,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:24,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:24,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:24,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:24,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:24,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273acbff049ee24a84844ee24c6dfe16f7_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713864622/Put/seqid=0 2024-11-27T13:24:24,637 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/87652677b3004779bae8af173d4301f1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/87652677b3004779bae8af173d4301f1 2024-11-27T13:24:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,643 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 87652677b3004779bae8af173d4301f1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:24,643 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:24,643 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=12, startTime=1732713864501; duration=0sec 2024-11-27T13:24:24,643 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:24,643 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:24,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742049_1225 (size=12454) 2024-11-27T13:24:24,654 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:24,659 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411273acbff049ee24a84844ee24c6dfe16f7_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273acbff049ee24a84844ee24c6dfe16f7_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:24,660 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/d0edb8b642324bca9872463a46e08bbf, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:24,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/d0edb8b642324bca9872463a46e08bbf is 175, key is test_row_0/A:col10/1732713864622/Put/seqid=0 2024-11-27T13:24:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-27T13:24:24,669 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-27T13:24:24,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-27T13:24:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:24,673 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742050_1226 (size=31255) 2024-11-27T13:24:24,674 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:24,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:24,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:24,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:24,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:24,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:24,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713924943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713924943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713924944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713924947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713924948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:24,963 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#185 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:24,964 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/f07c70da06f946c38480b9b5f935fb65 is 175, key is test_row_0/A:col10/1732713863961/Put/seqid=0 2024-11-27T13:24:24,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742051_1227 (size=31937) 2024-11-27T13:24:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:24,976 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/f07c70da06f946c38480b9b5f935fb65 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65 2024-11-27T13:24:24,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:24,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:24,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:24,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:24,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:24,982 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into f07c70da06f946c38480b9b5f935fb65(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:24,982 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:24,982 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713864500; duration=0sec 2024-11-27T13:24:24,982 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:24,982 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:25,074 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/d0edb8b642324bca9872463a46e08bbf 2024-11-27T13:24:25,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/480ff8d9b2574789840bb5cd296ec33c is 50, key is test_row_0/B:col10/1732713864622/Put/seqid=0 2024-11-27T13:24:25,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742052_1228 (size=12301) 2024-11-27T13:24:25,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/480ff8d9b2574789840bb5cd296ec33c 2024-11-27T13:24:25,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a189efb0f33d4e91a622feaa60efc5fc is 50, key is test_row_0/C:col10/1732713864622/Put/seqid=0 2024-11-27T13:24:25,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742053_1229 (size=12301) 2024-11-27T13:24:25,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a189efb0f33d4e91a622feaa60efc5fc 2024-11-27T13:24:25,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/d0edb8b642324bca9872463a46e08bbf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf 2024-11-27T13:24:25,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf, entries=150, sequenceid=317, filesize=30.5 K 2024-11-27T13:24:25,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/480ff8d9b2574789840bb5cd296ec33c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c 2024-11-27T13:24:25,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c, entries=150, sequenceid=317, filesize=12.0 K 2024-11-27T13:24:25,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a189efb0f33d4e91a622feaa60efc5fc as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc 2024-11-27T13:24:25,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc, entries=150, sequenceid=317, filesize=12.0 K 2024-11-27T13:24:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 509ms, sequenceid=317, compaction requested=false 2024-11-27T13:24:25,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:25,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:25,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:25,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a14f9675fa5d4e3cbd88292270d2464c_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:25,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713925269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713925273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713925273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713925274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713925274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742054_1230 (size=12454) 2024-11-27T13:24:25,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713925375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713925378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713925380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713925380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713925380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,438 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713925578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713925580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713925583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713925583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713925584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,686 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:25,691 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a14f9675fa5d4e3cbd88292270d2464c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a14f9675fa5d4e3cbd88292270d2464c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:25,692 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a648015ce3e346f9871b106443295659, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:25,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a648015ce3e346f9871b106443295659 is 175, key is test_row_0/A:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:25,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742055_1231 (size=31255) 2024-11-27T13:24:25,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:25,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713925882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713925886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713925886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713925887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713925890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:25,897 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:25,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:25,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:25,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,050 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:26,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,098 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a648015ce3e346f9871b106443295659 2024-11-27T13:24:26,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ee476caaac84474f85a55c266a9753f5 is 50, key is test_row_0/B:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:26,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742056_1232 (size=12301) 2024-11-27T13:24:26,119 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ee476caaac84474f85a55c266a9753f5 2024-11-27T13:24:26,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/e2e3284cec174422b258449356e1855e is 50, key is test_row_0/C:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:26,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742057_1233 (size=12301) 2024-11-27T13:24:26,204 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:26,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:26,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:26,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:26,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:26,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713926387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:26,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:26,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713926389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:26,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:26,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713926393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:26,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:26,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713926394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:26,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:26,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713926396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:26,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:26,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:26,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:26,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:26,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/e2e3284cec174422b258449356e1855e 2024-11-27T13:24:26,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/a648015ce3e346f9871b106443295659 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659 2024-11-27T13:24:26,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659, entries=150, sequenceid=332, filesize=30.5 K 2024-11-27T13:24:26,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/ee476caaac84474f85a55c266a9753f5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5 2024-11-27T13:24:26,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5, entries=150, sequenceid=332, filesize=12.0 K 2024-11-27T13:24:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/e2e3284cec174422b258449356e1855e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e 2024-11-27T13:24:26,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e, entries=150, sequenceid=332, filesize=12.0 K 2024-11-27T13:24:26,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1318ms, sequenceid=332, compaction requested=true 2024-11-27T13:24:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:26,566 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:26,566 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:26,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:26,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:26,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:26,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:26,567 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:26,567 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:26,567 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:26,567 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,568 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:26,568 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=92.2 K 2024-11-27T13:24:26,568 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,568 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,568 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659] 2024-11-27T13:24:26,568 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/96e3e8af8cac4119b62b3cbf5a1ef5bf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.7 K 2024-11-27T13:24:26,568 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f07c70da06f946c38480b9b5f935fb65, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:26,568 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 96e3e8af8cac4119b62b3cbf5a1ef5bf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:26,569 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 480ff8d9b2574789840bb5cd296ec33c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713864003 2024-11-27T13:24:26,569 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0edb8b642324bca9872463a46e08bbf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713864003 2024-11-27T13:24:26,569 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ee476caaac84474f85a55c266a9753f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:26,569 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a648015ce3e346f9871b106443295659, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:26,576 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:26,578 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#193 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:26,578 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/7a5243608b7843daa9e5ffc4127ff734 is 50, key is test_row_0/B:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:26,580 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127dc3e3c54696c4ebfb1c5073ecc008af8_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:26,582 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127dc3e3c54696c4ebfb1c5073ecc008af8_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:26,582 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127dc3e3c54696c4ebfb1c5073ecc008af8_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:26,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742058_1234 (size=13085) 2024-11-27T13:24:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742059_1235 (size=4469) 2024-11-27T13:24:26,599 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#194 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:26,600 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7101b762a0a049ca84c4a250832191a1 is 175, key is test_row_0/A:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:26,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742060_1236 (size=32039) 2024-11-27T13:24:26,616 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/7101b762a0a049ca84c4a250832191a1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1 2024-11-27T13:24:26,621 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 7101b762a0a049ca84c4a250832191a1(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:26,621 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:26,621 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713866566; duration=0sec 2024-11-27T13:24:26,622 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:26,622 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:26,622 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:26,623 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:26,623 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:26,623 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,623 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/87652677b3004779bae8af173d4301f1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.7 K 2024-11-27T13:24:26,624 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87652677b3004779bae8af173d4301f1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732713862842 2024-11-27T13:24:26,624 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a189efb0f33d4e91a622feaa60efc5fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713864003 2024-11-27T13:24:26,624 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2e3284cec174422b258449356e1855e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:26,632 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:26,632 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/5bcd58d4a8764f48971fe85778f6d1d8 is 50, key is test_row_0/C:col10/1732713865247/Put/seqid=0 2024-11-27T13:24:26,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742061_1237 (size=13051) 2024-11-27T13:24:26,653 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/5bcd58d4a8764f48971fe85778f6d1d8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/5bcd58d4a8764f48971fe85778f6d1d8 2024-11-27T13:24:26,659 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 5bcd58d4a8764f48971fe85778f6d1d8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:26,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:26,659 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=13, startTime=1732713866567; duration=0sec 2024-11-27T13:24:26,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:26,659 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:26,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:26,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-27T13:24:26,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:26,665 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:26,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:26,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112767414a52f60647b980881a1bc744f91b_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713865271/Put/seqid=0 2024-11-27T13:24:26,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742062_1238 (size=12454) 2024-11-27T13:24:26,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:26,685 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112767414a52f60647b980881a1bc744f91b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112767414a52f60647b980881a1bc744f91b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/172c34869b5841ff91328241d0ca5a50, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/172c34869b5841ff91328241d0ca5a50 is 175, key is test_row_0/A:col10/1732713865271/Put/seqid=0 2024-11-27T13:24:26,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742063_1239 (size=31255) 2024-11-27T13:24:26,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:26,994 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/7a5243608b7843daa9e5ffc4127ff734 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/7a5243608b7843daa9e5ffc4127ff734 2024-11-27T13:24:27,000 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 7a5243608b7843daa9e5ffc4127ff734(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:27,000 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:27,000 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713866566; duration=0sec 2024-11-27T13:24:27,001 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:27,001 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:27,091 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/172c34869b5841ff91328241d0ca5a50 2024-11-27T13:24:27,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/4dffc29fdf084e388e3cd36f850cb37b is 50, key is test_row_0/B:col10/1732713865271/Put/seqid=0 2024-11-27T13:24:27,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742064_1240 (size=12301) 2024-11-27T13:24:27,112 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/4dffc29fdf084e388e3cd36f850cb37b 2024-11-27T13:24:27,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a33a76cbf7074b689d92f0da958c6f1b is 50, key is test_row_0/C:col10/1732713865271/Put/seqid=0 2024-11-27T13:24:27,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742065_1241 (size=12301) 2024-11-27T13:24:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:27,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:27,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713927401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713927403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713927403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713927404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43948 deadline: 1732713927405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713927505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713927507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713927507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713927508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,526 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a33a76cbf7074b689d92f0da958c6f1b 2024-11-27T13:24:27,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/172c34869b5841ff91328241d0ca5a50 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50 2024-11-27T13:24:27,537 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50, entries=150, sequenceid=358, filesize=30.5 K 2024-11-27T13:24:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/4dffc29fdf084e388e3cd36f850cb37b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b 2024-11-27T13:24:27,542 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b, entries=150, sequenceid=358, filesize=12.0 K 2024-11-27T13:24:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/a33a76cbf7074b689d92f0da958c6f1b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b 2024-11-27T13:24:27,547 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b, entries=150, sequenceid=358, filesize=12.0 K 2024-11-27T13:24:27,548 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 884ms, sequenceid=358, compaction requested=false 2024-11-27T13:24:27,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:27,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:27,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-27T13:24:27,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-27T13:24:27,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-27T13:24:27,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8760 sec 2024-11-27T13:24:27,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 2.8810 sec 2024-11-27T13:24:27,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:27,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:27,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:27,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112794fc246cadf4420bb60a9414f93513eb_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742066_1242 (size=14994) 2024-11-27T13:24:27,723 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:27,728 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112794fc246cadf4420bb60a9414f93513eb_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112794fc246cadf4420bb60a9414f93513eb_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:27,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/be652931d1884eb6865e3afb26b3591e, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:27,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/be652931d1884eb6865e3afb26b3591e is 175, key is test_row_0/A:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:27,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742067_1243 (size=39949) 2024-11-27T13:24:27,736 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/be652931d1884eb6865e3afb26b3591e 2024-11-27T13:24:27,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713927734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713927736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713927738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713927738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e1ebcc2c8edd4fdd86ab85b08435be87 is 50, key is test_row_0/B:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:27,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742068_1244 (size=12301) 2024-11-27T13:24:27,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e1ebcc2c8edd4fdd86ab85b08435be87 2024-11-27T13:24:27,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/019359ceb1ab4e278fcd16f1e84ec501 is 50, key is test_row_0/C:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:27,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742069_1245 (size=12301) 2024-11-27T13:24:27,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713927839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713927839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713927840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:27,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713927841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713928042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713928042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713928043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713928043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/019359ceb1ab4e278fcd16f1e84ec501 2024-11-27T13:24:28,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/be652931d1884eb6865e3afb26b3591e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e 2024-11-27T13:24:28,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e, entries=200, sequenceid=372, filesize=39.0 K 2024-11-27T13:24:28,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/e1ebcc2c8edd4fdd86ab85b08435be87 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87 2024-11-27T13:24:28,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87, entries=150, sequenceid=372, filesize=12.0 K 2024-11-27T13:24:28,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/019359ceb1ab4e278fcd16f1e84ec501 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501 2024-11-27T13:24:28,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501, entries=150, sequenceid=372, filesize=12.0 K 2024-11-27T13:24:28,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 483ms, sequenceid=372, compaction requested=true 2024-11-27T13:24:28,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:28,193 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:28,193 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e8c8da06d0d53cf44a4ff2a4693ab7d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:28,194 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:28,194 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/B is initiating minor compaction (all files) 2024-11-27T13:24:28,194 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:28,194 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/B in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:28,194 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/A is initiating minor compaction (all files) 2024-11-27T13:24:28,194 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/7a5243608b7843daa9e5ffc4127ff734, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.8 K 2024-11-27T13:24:28,194 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/A in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:28,194 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=100.8 K 2024-11-27T13:24:28,194 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:28,194 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e] 2024-11-27T13:24:28,195 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a5243608b7843daa9e5ffc4127ff734, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:28,195 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7101b762a0a049ca84c4a250832191a1, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:28,196 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 172c34869b5841ff91328241d0ca5a50, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732713865267 2024-11-27T13:24:28,196 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dffc29fdf084e388e3cd36f850cb37b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732713865267 2024-11-27T13:24:28,197 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ebcc2c8edd4fdd86ab85b08435be87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732713867401 2024-11-27T13:24:28,197 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting be652931d1884eb6865e3afb26b3591e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732713867401 2024-11-27T13:24:28,206 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:28,208 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#B#compaction#203 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:28,209 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/1395de3497c9458b9541a33973ee4f46 is 50, key is test_row_0/B:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:28,212 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127d574fe467f024018955323ee73a1f236_3e8c8da06d0d53cf44a4ff2a4693ab7d store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:28,216 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127d574fe467f024018955323ee73a1f236_3e8c8da06d0d53cf44a4ff2a4693ab7d, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:28,216 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127d574fe467f024018955323ee73a1f236_3e8c8da06d0d53cf44a4ff2a4693ab7d because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:28,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742070_1246 (size=13187) 2024-11-27T13:24:28,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742071_1247 (size=4469) 2024-11-27T13:24:28,241 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#A#compaction#202 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:28,242 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/e3bc3af2076a4d14aa26480352d0e284 is 175, key is test_row_0/A:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742072_1248 (size=32141) 2024-11-27T13:24:28,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:28,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713928358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713928359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713928359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713928360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c38600793d3649baa2cdf02fdc309875_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713867736/Put/seqid=0 2024-11-27T13:24:28,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742073_1249 (size=14994) 2024-11-27T13:24:28,370 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:28,373 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c38600793d3649baa2cdf02fdc309875_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c38600793d3649baa2cdf02fdc309875_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:28,375 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/9624c55b78da400891c429875da589c7, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:28,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/9624c55b78da400891c429875da589c7 is 175, key is test_row_0/A:col10/1732713867736/Put/seqid=0 2024-11-27T13:24:28,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742074_1250 (size=39949) 2024-11-27T13:24:28,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713928462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713928462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713928462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713928462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,639 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/1395de3497c9458b9541a33973ee4f46 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/1395de3497c9458b9541a33973ee4f46 2024-11-27T13:24:28,645 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/B of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 1395de3497c9458b9541a33973ee4f46(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:28,645 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:28,645 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/B, priority=13, startTime=1732713868193; duration=0sec 2024-11-27T13:24:28,645 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:28,645 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:B 2024-11-27T13:24:28,645 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:28,646 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:28,646 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 3e8c8da06d0d53cf44a4ff2a4693ab7d/C is initiating minor compaction (all files) 2024-11-27T13:24:28,646 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e8c8da06d0d53cf44a4ff2a4693ab7d/C in TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:28,646 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/5bcd58d4a8764f48971fe85778f6d1d8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp, totalSize=36.8 K 2024-11-27T13:24:28,647 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bcd58d4a8764f48971fe85778f6d1d8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713864636 2024-11-27T13:24:28,647 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a33a76cbf7074b689d92f0da958c6f1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732713865267 2024-11-27T13:24:28,648 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 019359ceb1ab4e278fcd16f1e84ec501, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732713867401 2024-11-27T13:24:28,652 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/e3bc3af2076a4d14aa26480352d0e284 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/e3bc3af2076a4d14aa26480352d0e284 2024-11-27T13:24:28,657 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/A of 3e8c8da06d0d53cf44a4ff2a4693ab7d into e3bc3af2076a4d14aa26480352d0e284(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:28,657 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:28,657 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/A, priority=13, startTime=1732713868192; duration=0sec 2024-11-27T13:24:28,658 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:28,658 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:A 2024-11-27T13:24:28,659 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e8c8da06d0d53cf44a4ff2a4693ab7d#C#compaction#205 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:28,660 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6231704f21cb4199a918ce0f47986c40 is 50, key is test_row_0/C:col10/1732713867401/Put/seqid=0 2024-11-27T13:24:28,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742075_1251 (size=13153) 2024-11-27T13:24:28,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43922 deadline: 1732713928666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43958 deadline: 1732713928666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43920 deadline: 1732713928666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:28,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43970 deadline: 1732713928667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:28,670 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/6231704f21cb4199a918ce0f47986c40 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6231704f21cb4199a918ce0f47986c40 2024-11-27T13:24:28,675 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e8c8da06d0d53cf44a4ff2a4693ab7d/C of 3e8c8da06d0d53cf44a4ff2a4693ab7d into 6231704f21cb4199a918ce0f47986c40(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:28,675 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:28,675 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d., storeName=3e8c8da06d0d53cf44a4ff2a4693ab7d/C, priority=13, startTime=1732713868193; duration=0sec 2024-11-27T13:24:28,675 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:28,675 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e8c8da06d0d53cf44a4ff2a4693ab7d:C 2024-11-27T13:24:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-27T13:24:28,778 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-27T13:24:28,779 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:28,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-11-27T13:24:28,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:28,781 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:28,782 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:28,782 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/9624c55b78da400891c429875da589c7 2024-11-27T13:24:28,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:28,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2f8a4f8a0db140aab25900baf24560ec is 50, key is test_row_0/B:col10/1732713867736/Put/seqid=0 2024-11-27T13:24:28,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742076_1252 (size=12301) 2024-11-27T13:24:28,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2f8a4f8a0db140aab25900baf24560ec 2024-11-27T13:24:28,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/0ccf679446424f1ba596dd3e9a1d3f0d is 50, key is test_row_0/C:col10/1732713867736/Put/seqid=0 2024-11-27T13:24:28,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742077_1253 (size=12301) 2024-11-27T13:24:28,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/0ccf679446424f1ba596dd3e9a1d3f0d 2024-11-27T13:24:28,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/9624c55b78da400891c429875da589c7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/9624c55b78da400891c429875da589c7 2024-11-27T13:24:28,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/9624c55b78da400891c429875da589c7, entries=200, sequenceid=397, filesize=39.0 K 2024-11-27T13:24:28,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/2f8a4f8a0db140aab25900baf24560ec as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2f8a4f8a0db140aab25900baf24560ec 2024-11-27T13:24:28,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2f8a4f8a0db140aab25900baf24560ec, entries=150, sequenceid=397, filesize=12.0 K 2024-11-27T13:24:28,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/0ccf679446424f1ba596dd3e9a1d3f0d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/0ccf679446424f1ba596dd3e9a1d3f0d 2024-11-27T13:24:28,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/0ccf679446424f1ba596dd3e9a1d3f0d, entries=150, sequenceid=397, filesize=12.0 K 2024-11-27T13:24:28,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 511ms, sequenceid=397, compaction requested=false 2024-11-27T13:24:28,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:28,905 DEBUG [Thread-708 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:59011 2024-11-27T13:24:28,905 DEBUG [Thread-708 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,905 DEBUG [Thread-704 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:59011 2024-11-27T13:24:28,905 DEBUG [Thread-704 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,907 DEBUG [Thread-702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:59011 2024-11-27T13:24:28,907 DEBUG [Thread-702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,907 DEBUG [Thread-706 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:59011 2024-11-27T13:24:28,907 DEBUG [Thread-706 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:28,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-27T13:24:28,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:28,935 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:28,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112732d5025171a24c1682052d9044e0cd2a_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713868358/Put/seqid=0 2024-11-27T13:24:28,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742078_1254 (size=12454) 2024-11-27T13:24:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:28,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. as already flushing 2024-11-27T13:24:28,972 DEBUG [Thread-697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:59011 2024-11-27T13:24:28,972 DEBUG [Thread-697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,972 DEBUG [Thread-693 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:59011 2024-11-27T13:24:28,972 DEBUG [Thread-693 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,973 DEBUG [Thread-699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:59011 2024-11-27T13:24:28,973 DEBUG [Thread-699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:28,974 DEBUG [Thread-695 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:59011 2024-11-27T13:24:28,974 DEBUG [Thread-695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:29,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:29,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:29,350 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112732d5025171a24c1682052d9044e0cd2a_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112732d5025171a24c1682052d9044e0cd2a_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:29,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/623a9a42f2894066ae24e36ea02a063d, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:29,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/623a9a42f2894066ae24e36ea02a063d is 175, key is test_row_0/A:col10/1732713868358/Put/seqid=0 2024-11-27T13:24:29,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742079_1255 (size=31255) 2024-11-27T13:24:29,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:29,416 DEBUG [Thread-691 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7362d978 to 127.0.0.1:59011 2024-11-27T13:24:29,416 DEBUG [Thread-691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:29,756 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=412, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/623a9a42f2894066ae24e36ea02a063d 2024-11-27T13:24:29,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/75b519b7e5574b08a7fe006edc62f95f is 50, key is test_row_0/B:col10/1732713868358/Put/seqid=0 2024-11-27T13:24:29,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742080_1256 (size=12301) 2024-11-27T13:24:29,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:30,169 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/75b519b7e5574b08a7fe006edc62f95f 2024-11-27T13:24:30,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/860a8226022a439d91a2e1cf4eb6f354 is 50, key is test_row_0/C:col10/1732713868358/Put/seqid=0 2024-11-27T13:24:30,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742081_1257 (size=12301) 2024-11-27T13:24:30,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/860a8226022a439d91a2e1cf4eb6f354 2024-11-27T13:24:30,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/623a9a42f2894066ae24e36ea02a063d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/623a9a42f2894066ae24e36ea02a063d 2024-11-27T13:24:30,590 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/623a9a42f2894066ae24e36ea02a063d, entries=150, sequenceid=412, filesize=30.5 K 2024-11-27T13:24:30,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/75b519b7e5574b08a7fe006edc62f95f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/75b519b7e5574b08a7fe006edc62f95f 2024-11-27T13:24:30,594 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/75b519b7e5574b08a7fe006edc62f95f, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:24:30,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/860a8226022a439d91a2e1cf4eb6f354 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/860a8226022a439d91a2e1cf4eb6f354 2024-11-27T13:24:30,599 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/860a8226022a439d91a2e1cf4eb6f354, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:24:30,599 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=33.54 KB/34350 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1664ms, sequenceid=412, compaction requested=true 2024-11-27T13:24:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-11-27T13:24:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-11-27T13:24:30,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-27T13:24:30,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8190 sec 2024-11-27T13:24:30,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.8230 sec 2024-11-27T13:24:30,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-27T13:24:30,885 INFO [Thread-701 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6628 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6206 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2682 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8046 rows 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2690 2024-11-27T13:24:30,885 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8070 rows 2024-11-27T13:24:30,885 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:24:30,885 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04506927 to 127.0.0.1:59011 2024-11-27T13:24:30,885 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:30,888 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:24:30,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:24:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:30,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:30,893 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713870892"}]},"ts":"1732713870892"} 2024-11-27T13:24:30,894 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:24:30,896 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:24:30,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:24:30,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, UNASSIGN}] 2024-11-27T13:24:30,899 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, UNASSIGN 2024-11-27T13:24:30,899 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:30,900 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:24:30,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; CloseRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:30,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:31,051 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:31,052 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(124): Close 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:31,052 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:24:31,052 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:24:31,052 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1681): Closing 3e8c8da06d0d53cf44a4ff2a4693ab7d, disabling compactions & flushes 2024-11-27T13:24:31,052 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:31,052 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:31,052 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. after waiting 0 ms 2024-11-27T13:24:31,052 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:31,052 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(2837): Flushing 3e8c8da06d0d53cf44a4ff2a4693ab7d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=A 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=B 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e8c8da06d0d53cf44a4ff2a4693ab7d, store=C 2024-11-27T13:24:31,053 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:31,059 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112706d77eb2442a4f5cbe60d3a9acadcc02_3e8c8da06d0d53cf44a4ff2a4693ab7d is 50, key is test_row_0/A:col10/1732713869415/Put/seqid=0 2024-11-27T13:24:31,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742082_1258 (size=12454) 2024-11-27T13:24:31,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:31,466 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:31,470 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112706d77eb2442a4f5cbe60d3a9acadcc02_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112706d77eb2442a4f5cbe60d3a9acadcc02_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:31,471 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/305136d268b444e3b43387058d0d90ba, store: [table=TestAcidGuarantees family=A region=3e8c8da06d0d53cf44a4ff2a4693ab7d] 2024-11-27T13:24:31,471 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/305136d268b444e3b43387058d0d90ba is 175, key is test_row_0/A:col10/1732713869415/Put/seqid=0 2024-11-27T13:24:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742083_1259 (size=31255) 2024-11-27T13:24:31,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:31,876 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=420, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/305136d268b444e3b43387058d0d90ba 2024-11-27T13:24:31,883 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a56b8cb4f0804bda86935527c6f9a968 is 50, key is test_row_0/B:col10/1732713869415/Put/seqid=0 2024-11-27T13:24:31,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742084_1260 (size=12301) 2024-11-27T13:24:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:32,287 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a56b8cb4f0804bda86935527c6f9a968 2024-11-27T13:24:32,294 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/48210d4d0d88491a9a39a84e17461666 is 50, key is test_row_0/C:col10/1732713869415/Put/seqid=0 2024-11-27T13:24:32,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742085_1261 (size=12301) 2024-11-27T13:24:32,698 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/48210d4d0d88491a9a39a84e17461666 2024-11-27T13:24:32,703 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/A/305136d268b444e3b43387058d0d90ba as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/305136d268b444e3b43387058d0d90ba 2024-11-27T13:24:32,706 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/305136d268b444e3b43387058d0d90ba, entries=150, sequenceid=420, filesize=30.5 K 2024-11-27T13:24:32,708 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/B/a56b8cb4f0804bda86935527c6f9a968 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a56b8cb4f0804bda86935527c6f9a968 2024-11-27T13:24:32,711 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a56b8cb4f0804bda86935527c6f9a968, entries=150, sequenceid=420, filesize=12.0 K 2024-11-27T13:24:32,712 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/.tmp/C/48210d4d0d88491a9a39a84e17461666 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/48210d4d0d88491a9a39a84e17461666 2024-11-27T13:24:32,715 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/48210d4d0d88491a9a39a84e17461666, entries=150, sequenceid=420, filesize=12.0 K 2024-11-27T13:24:32,716 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 3e8c8da06d0d53cf44a4ff2a4693ab7d in 1664ms, sequenceid=420, compaction requested=true 2024-11-27T13:24:32,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e] to archive 2024-11-27T13:24:32,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:32,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/69f8c27c2d12442489a811281ba662e9 2024-11-27T13:24:32,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/2f2e1dd0dc4d4940ac2470603b92a99c 2024-11-27T13:24:32,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/349d64cfa0d64f76b010a3e4df30982f 2024-11-27T13:24:32,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7a8a35a84ecf4fb299fbbd9fb0232f02 2024-11-27T13:24:32,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/b8e05d7040974ad0854877133bd34a36 2024-11-27T13:24:32,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1d7245475db841f0871f2b77681f09be 2024-11-27T13:24:32,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c76814b1137a40abb419b3913db23d49 2024-11-27T13:24:32,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7fe3cad95d634ef2bad8e6477d4d4136 2024-11-27T13:24:32,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/06b132a09fe64fa38857b16d5cf355d1 2024-11-27T13:24:32,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/1367fdb477d74103b112a5987d2c4350 2024-11-27T13:24:32,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/191c84358a6f4d19b83afb40746cfff7 2024-11-27T13:24:32,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/ffd8e18a0c494dbf99f81b6e9a04d3b0 2024-11-27T13:24:32,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/14a3cd94873a4031a9d687b68cda38e7 2024-11-27T13:24:32,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/73064a94fed4452981fd166e1cfe352f 2024-11-27T13:24:32,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/c09db2ae602848b895606421e412d088 2024-11-27T13:24:32,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/3af7ca8c2fcd4e6d85855e1daceeb082 2024-11-27T13:24:32,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/98fe953c80aa4170b3f8bae096dceff5 2024-11-27T13:24:32,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a1cd84aa6ae14ff3818ef8036ff2056c 2024-11-27T13:24:32,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/636ba085d31a4791becc3cde674e73c5 2024-11-27T13:24:32,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/15bfc3e72548405b8e031f26e5132634 2024-11-27T13:24:32,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/f07c70da06f946c38480b9b5f935fb65 2024-11-27T13:24:32,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/d0edb8b642324bca9872463a46e08bbf 2024-11-27T13:24:32,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/7101b762a0a049ca84c4a250832191a1 2024-11-27T13:24:32,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/a648015ce3e346f9871b106443295659 2024-11-27T13:24:32,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/172c34869b5841ff91328241d0ca5a50 2024-11-27T13:24:32,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/be652931d1884eb6865e3afb26b3591e 2024-11-27T13:24:32,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/abed73b3a75e4b7987866089fab30a56, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/c80377eb1f174fbd8e21ffd91966cf8d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d2e09f2887ab47899f90620874639413, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d82f99f92e094f6e866870c8e5b48545, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/64db691230124403b68d3a825e64b766, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/96e3e8af8cac4119b62b3cbf5a1ef5bf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/7a5243608b7843daa9e5ffc4127ff734, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87] to archive 2024-11-27T13:24:32,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:32,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2aa4f7d3abda48288d626a412e328e22 2024-11-27T13:24:32,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/88bfcdfed8304f18a4f3441bb0564a49 2024-11-27T13:24:32,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/abed73b3a75e4b7987866089fab30a56 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/abed73b3a75e4b7987866089fab30a56 2024-11-27T13:24:32,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2e6c7dd8eb3c4875b0a053d9d824d2ea 2024-11-27T13:24:32,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ef4185db13a84447b7a4bbabc04e0bee 2024-11-27T13:24:32,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d33e028000bf4af298bf43f6a5dc4864 2024-11-27T13:24:32,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/c80377eb1f174fbd8e21ffd91966cf8d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/c80377eb1f174fbd8e21ffd91966cf8d 2024-11-27T13:24:32,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/243abb24ac234f8180d2a14e6139142d 2024-11-27T13:24:32,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2af1977a522a4c23b55575ef4c894ba4 2024-11-27T13:24:32,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d2e09f2887ab47899f90620874639413 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d2e09f2887ab47899f90620874639413 2024-11-27T13:24:32,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3e8e82b435c94292ae3bd6accc21e068 2024-11-27T13:24:32,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/3abb7bdc2efa421583704435d1d21f9b 2024-11-27T13:24:32,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e8b17ca0f9104a5a8c0cc87315d7552b 2024-11-27T13:24:32,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d82f99f92e094f6e866870c8e5b48545 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/d82f99f92e094f6e866870c8e5b48545 2024-11-27T13:24:32,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/5459a312736f4fc0948b96881aee9357 2024-11-27T13:24:32,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/11ad3e2d258545f3950a22b8b1ef5538 2024-11-27T13:24:32,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/64db691230124403b68d3a825e64b766 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/64db691230124403b68d3a825e64b766 2024-11-27T13:24:32,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/45a1ac3df3684176ab8b02115234101f 2024-11-27T13:24:32,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/f269fe72bbbf4045957320378f88da5e 2024-11-27T13:24:32,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/96e3e8af8cac4119b62b3cbf5a1ef5bf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/96e3e8af8cac4119b62b3cbf5a1ef5bf 2024-11-27T13:24:32,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a54b14b4589449158b7ee2d36ac5794c 2024-11-27T13:24:32,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/480ff8d9b2574789840bb5cd296ec33c 2024-11-27T13:24:32,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/7a5243608b7843daa9e5ffc4127ff734 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/7a5243608b7843daa9e5ffc4127ff734 2024-11-27T13:24:32,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/ee476caaac84474f85a55c266a9753f5 2024-11-27T13:24:32,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/4dffc29fdf084e388e3cd36f850cb37b 2024-11-27T13:24:32,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/e1ebcc2c8edd4fdd86ab85b08435be87 2024-11-27T13:24:32,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3eb000482946438c95778fb1d27d2ce5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3c1635ac363a44baac1be33b4066edd7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1f84a0ad2b994d6ea7a27e98c6242013, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6048aeb67e1a4ec290c1f707b89b5fea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/87652677b3004779bae8af173d4301f1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/5bcd58d4a8764f48971fe85778f6d1d8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501] to archive 2024-11-27T13:24:32,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:24:32,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/127c146cd3ce4b1fb7a2d06b2f984531 2024-11-27T13:24:32,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/9b18f70e385c4ca588bbaacd66f2acdb 2024-11-27T13:24:32,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3eb000482946438c95778fb1d27d2ce5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3eb000482946438c95778fb1d27d2ce5 2024-11-27T13:24:32,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1a3ebf3dc1924f15ab257b2a5c39c948 2024-11-27T13:24:32,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6d7378fa42b445c183d42774efe083c3 2024-11-27T13:24:32,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/8fc7d09392104bb5992a94f385eb2133 2024-11-27T13:24:32,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3c1635ac363a44baac1be33b4066edd7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/3c1635ac363a44baac1be33b4066edd7 2024-11-27T13:24:32,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d947605031944860a7277fa4edd178e6 2024-11-27T13:24:32,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/469bbc9eb26747909bf48e9064ed5211 2024-11-27T13:24:32,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1f84a0ad2b994d6ea7a27e98c6242013 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/1f84a0ad2b994d6ea7a27e98c6242013 2024-11-27T13:24:32,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/cbba95b4008946e48d5412ae74763e96 2024-11-27T13:24:32,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/65e1873866af4a4392ff260cf2636ef2 2024-11-27T13:24:32,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/86fc01d1e5f24c6eb4b6e34de347853d 2024-11-27T13:24:32,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/bc680f33f71343b980d3fabff0724c4d 2024-11-27T13:24:32,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6048aeb67e1a4ec290c1f707b89b5fea to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6048aeb67e1a4ec290c1f707b89b5fea 2024-11-27T13:24:32,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/d5206f539a8345cbb74eaa5335c749d8 2024-11-27T13:24:32,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/2b1b9ce8a4544e1d8892553697540f65 2024-11-27T13:24:32,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/df5ea92bd71a47a5b0a62df99d93c555 2024-11-27T13:24:32,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/87652677b3004779bae8af173d4301f1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/87652677b3004779bae8af173d4301f1 2024-11-27T13:24:32,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/524f628d0a05472f95e9b97cafa35e78 2024-11-27T13:24:32,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a189efb0f33d4e91a622feaa60efc5fc 2024-11-27T13:24:32,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/5bcd58d4a8764f48971fe85778f6d1d8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/5bcd58d4a8764f48971fe85778f6d1d8 2024-11-27T13:24:32,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/e2e3284cec174422b258449356e1855e 2024-11-27T13:24:32,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/a33a76cbf7074b689d92f0da958c6f1b 2024-11-27T13:24:32,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/019359ceb1ab4e278fcd16f1e84ec501 2024-11-27T13:24:32,804 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits/423.seqid, newMaxSeqId=423, maxSeqId=4 2024-11-27T13:24:32,805 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d. 2024-11-27T13:24:32,805 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1635): Region close journal for 3e8c8da06d0d53cf44a4ff2a4693ab7d: 2024-11-27T13:24:32,806 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(170): Closed 3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:32,807 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=3e8c8da06d0d53cf44a4ff2a4693ab7d, regionState=CLOSED 2024-11-27T13:24:32,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-27T13:24:32,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseRegionProcedure 3e8c8da06d0d53cf44a4ff2a4693ab7d, server=a0541979a851,32819,1732713812705 in 1.9080 sec 2024-11-27T13:24:32,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-27T13:24:32,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e8c8da06d0d53cf44a4ff2a4693ab7d, UNASSIGN in 1.9110 sec 2024-11-27T13:24:32,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-27T13:24:32,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9130 sec 2024-11-27T13:24:32,812 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713872812"}]},"ts":"1732713872812"} 2024-11-27T13:24:32,813 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:24:32,815 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:24:32,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9280 sec 2024-11-27T13:24:32,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-27T13:24:32,996 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-27T13:24:32,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:24:32,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:32,998 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T13:24:32,998 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=71, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:33,000 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,002 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits] 2024-11-27T13:24:33,005 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/305136d268b444e3b43387058d0d90ba to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/305136d268b444e3b43387058d0d90ba 2024-11-27T13:24:33,007 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/623a9a42f2894066ae24e36ea02a063d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/623a9a42f2894066ae24e36ea02a063d 2024-11-27T13:24:33,008 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/9624c55b78da400891c429875da589c7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/9624c55b78da400891c429875da589c7 2024-11-27T13:24:33,009 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/e3bc3af2076a4d14aa26480352d0e284 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/A/e3bc3af2076a4d14aa26480352d0e284 2024-11-27T13:24:33,011 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/1395de3497c9458b9541a33973ee4f46 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/1395de3497c9458b9541a33973ee4f46 2024-11-27T13:24:33,012 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2f8a4f8a0db140aab25900baf24560ec to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/2f8a4f8a0db140aab25900baf24560ec 2024-11-27T13:24:33,014 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/75b519b7e5574b08a7fe006edc62f95f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/75b519b7e5574b08a7fe006edc62f95f 2024-11-27T13:24:33,015 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a56b8cb4f0804bda86935527c6f9a968 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/B/a56b8cb4f0804bda86935527c6f9a968 2024-11-27T13:24:33,017 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/0ccf679446424f1ba596dd3e9a1d3f0d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/0ccf679446424f1ba596dd3e9a1d3f0d 2024-11-27T13:24:33,018 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/48210d4d0d88491a9a39a84e17461666 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/48210d4d0d88491a9a39a84e17461666 2024-11-27T13:24:33,019 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6231704f21cb4199a918ce0f47986c40 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/6231704f21cb4199a918ce0f47986c40 2024-11-27T13:24:33,020 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/860a8226022a439d91a2e1cf4eb6f354 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/C/860a8226022a439d91a2e1cf4eb6f354 2024-11-27T13:24:33,023 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits/423.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d/recovered.edits/423.seqid 2024-11-27T13:24:33,023 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,023 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:24:33,024 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:24:33,025 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T13:24:33,028 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112706d77eb2442a4f5cbe60d3a9acadcc02_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112706d77eb2442a4f5cbe60d3a9acadcc02_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,029 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270796924298e940e7bae95a7a01fc8038_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270796924298e940e7bae95a7a01fc8038_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,030 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270c00ee67634948379ab36c50dd8cc70d_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270c00ee67634948379ab36c50dd8cc70d_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,031 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112732d5025171a24c1682052d9044e0cd2a_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112732d5025171a24c1682052d9044e0cd2a_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,032 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273acbff049ee24a84844ee24c6dfe16f7_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411273acbff049ee24a84844ee24c6dfe16f7_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,034 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742354e9b12774f7abdabf97ed9324411_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742354e9b12774f7abdabf97ed9324411_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,035 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112767414a52f60647b980881a1bc744f91b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112767414a52f60647b980881a1bc744f91b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,036 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127692fc6e379b74f09bef38a8ba9c67b7f_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127692fc6e379b74f09bef38a8ba9c67b7f_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,037 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112770e59a6370e94ea999eef48c36112b2b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112770e59a6370e94ea999eef48c36112b2b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,038 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411277182e1b98bd24a8a9b90c8196ef8f2a3_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411277182e1b98bd24a8a9b90c8196ef8f2a3_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,039 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278637e169ca9a46068058492be36fb85c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278637e169ca9a46068058492be36fb85c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,040 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112794fc246cadf4420bb60a9414f93513eb_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112794fc246cadf4420bb60a9414f93513eb_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,041 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112795f860b9af10482982ba32134b741f7b_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112795f860b9af10482982ba32134b741f7b_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,042 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a14f9675fa5d4e3cbd88292270d2464c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a14f9675fa5d4e3cbd88292270d2464c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,043 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a347d4b9e56c4bb584bd36ff73d3be7e_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a347d4b9e56c4bb584bd36ff73d3be7e_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,044 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a7af6ea882404cad80481537666f9f53_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a7af6ea882404cad80481537666f9f53_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,045 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b007ec6d810e43468bf3f0d329653d89_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b007ec6d810e43468bf3f0d329653d89_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,046 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b9367fbc6d7e4c5185f56260cc674b46_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127b9367fbc6d7e4c5185f56260cc674b46_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,047 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c20dc189b3dd41c7a33dbf45fe75ccea_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c20dc189b3dd41c7a33dbf45fe75ccea_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,048 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c38600793d3649baa2cdf02fdc309875_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c38600793d3649baa2cdf02fdc309875_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,049 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c86e85ab8232424f81b475ab085da415_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c86e85ab8232424f81b475ab085da415_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,050 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e68732296cf44de19154b6a645afee1c_3e8c8da06d0d53cf44a4ff2a4693ab7d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e68732296cf44de19154b6a645afee1c_3e8c8da06d0d53cf44a4ff2a4693ab7d 2024-11-27T13:24:33,051 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:24:33,053 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=71, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:33,056 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:24:33,058 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:24:33,058 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=71, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:33,058 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:24:33,059 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713873058"}]},"ts":"9223372036854775807"} 2024-11-27T13:24:33,060 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:24:33,060 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3e8c8da06d0d53cf44a4ff2a4693ab7d, NAME => 'TestAcidGuarantees,,1732713845772.3e8c8da06d0d53cf44a4ff2a4693ab7d.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:24:33,060 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:24:33,061 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713873060"}]},"ts":"9223372036854775807"} 2024-11-27T13:24:33,062 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:24:33,065 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=71, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:33,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 69 msec 2024-11-27T13:24:33,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-27T13:24:33,099 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-27T13:24:33,110 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 239) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_374866773_22 at /127.0.0.1:50776 [Waiting for operation #562] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_374866773_22 at /127.0.0.1:50786 [Waiting for operation #577] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1987755983_22 at /127.0.0.1:43958 [Waiting for operation #658] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1987755983_22 at /127.0.0.1:43890 [Waiting for operation #697] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=465 (was 456) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=478 (was 451) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4200 (was 4341) 2024-11-27T13:24:33,120 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=478, ProcessCount=11, AvailableMemoryMB=4200 2024-11-27T13:24:33,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:24:33,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:24:33,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:33,123 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:24:33,123 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:33,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 72 2024-11-27T13:24:33,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:33,124 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:24:33,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742086_1262 (size=963) 2024-11-27T13:24:33,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:33,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:33,532 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:24:33,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742087_1263 (size=53) 2024-11-27T13:24:33,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:33,939 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:24:33,939 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 160ba87e97489a540350dc572e5f397d, disabling compactions & flushes 2024-11-27T13:24:33,939 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:33,939 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:33,939 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. after waiting 0 ms 2024-11-27T13:24:33,940 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:33,940 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:33,940 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:33,941 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:24:33,941 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713873941"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713873941"}]},"ts":"1732713873941"} 2024-11-27T13:24:33,942 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:24:33,943 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:24:33,943 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713873943"}]},"ts":"1732713873943"} 2024-11-27T13:24:33,944 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:24:33,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, ASSIGN}] 2024-11-27T13:24:33,951 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, ASSIGN 2024-11-27T13:24:33,952 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:24:34,103 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=160ba87e97489a540350dc572e5f397d, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:34,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; OpenRegionProcedure 160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:34,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:34,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:34,258 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:34,258 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7285): Opening region: {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:24:34,259 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,259 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:24:34,259 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7327): checking encryption for 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,259 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7330): checking classloading for 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,260 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,261 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:34,262 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 160ba87e97489a540350dc572e5f397d columnFamilyName A 2024-11-27T13:24:34,262 DEBUG [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:34,262 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(327): Store=160ba87e97489a540350dc572e5f397d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:34,262 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,263 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:34,263 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 160ba87e97489a540350dc572e5f397d columnFamilyName B 2024-11-27T13:24:34,263 DEBUG [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:34,264 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(327): Store=160ba87e97489a540350dc572e5f397d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:34,264 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,265 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:24:34,265 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 160ba87e97489a540350dc572e5f397d columnFamilyName C 2024-11-27T13:24:34,265 DEBUG [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:24:34,266 INFO [StoreOpener-160ba87e97489a540350dc572e5f397d-1 {}] regionserver.HStore(327): Store=160ba87e97489a540350dc572e5f397d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:24:34,266 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:34,266 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,267 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,268 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:24:34,269 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1085): writing seq id for 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:34,271 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:24:34,271 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1102): Opened 160ba87e97489a540350dc572e5f397d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67742116, jitterRate=0.009436190128326416}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:24:34,272 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1001): Region open journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:34,272 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., pid=74, masterSystemTime=1732713874255 2024-11-27T13:24:34,274 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:34,274 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:34,274 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=160ba87e97489a540350dc572e5f397d, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:34,276 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-27T13:24:34,276 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; OpenRegionProcedure 160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 in 171 msec 2024-11-27T13:24:34,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=72 2024-11-27T13:24:34,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=72, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, ASSIGN in 326 msec 2024-11-27T13:24:34,278 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:24:34,278 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713874278"}]},"ts":"1732713874278"} 2024-11-27T13:24:34,279 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:24:34,282 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:24:34,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1600 sec 2024-11-27T13:24:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-27T13:24:35,228 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 72 completed 2024-11-27T13:24:35,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b6adc5 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a569490 2024-11-27T13:24:35,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1ac389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,235 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,236 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36512, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,237 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:24:35,238 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:24:35,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x669e1999 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6862e3ce 2024-11-27T13:24:35,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e73c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,244 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72aa9ee5 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d296fed 2024-11-27T13:24:35,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c480dfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,248 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-11-27T13:24:35,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,251 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-11-27T13:24:35,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-11-27T13:24:35,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-11-27T13:24:35,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,262 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-11-27T13:24:35,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,265 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-11-27T13:24:35,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,268 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-11-27T13:24:35,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-11-27T13:24:35,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:24:35,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:35,277 DEBUG [hconnection-0x2acaf34f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,277 DEBUG [hconnection-0x1a57398b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,277 DEBUG [hconnection-0x3f9b203a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-27T13:24:35,278 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,278 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,279 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:35,279 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:35,280 DEBUG [hconnection-0x221c9095-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,280 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:35,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:35,281 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,281 DEBUG [hconnection-0x16ded5f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,282 DEBUG [hconnection-0x32afd8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,282 DEBUG [hconnection-0x542dc3ca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,282 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,283 DEBUG [hconnection-0x6695923f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,283 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,283 DEBUG [hconnection-0x3ea24f11-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,283 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,284 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,284 DEBUG [hconnection-0x1e5ec527-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:24:35,284 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,285 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:24:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:35,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:35,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:35,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/ff8ee2d22c51461b826a2a6a5eb5e815 is 50, key is test_row_0/A:col10/1732713875291/Put/seqid=0 2024-11-27T13:24:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713935318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713935318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713935319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713935319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713935319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:35,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742088_1264 (size=12001) 2024-11-27T13:24:35,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713935423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713935423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713935424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713935424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713935424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:35,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:35,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:35,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:35,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:35,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:35,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713935625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713935625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713935626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713935627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713935628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:35,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:35,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:35,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/ff8ee2d22c51461b826a2a6a5eb5e815 2024-11-27T13:24:35,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/8dd0e46b42a149049f8670c7cb317eef is 50, key is test_row_0/B:col10/1732713875291/Put/seqid=0 2024-11-27T13:24:35,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742089_1265 (size=12001) 2024-11-27T13:24:35,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/8dd0e46b42a149049f8670c7cb317eef 2024-11-27T13:24:35,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 is 50, key is test_row_0/C:col10/1732713875291/Put/seqid=0 2024-11-27T13:24:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742090_1266 (size=12001) 2024-11-27T13:24:35,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:35,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:35,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:35,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:35,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:35,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:35,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713935928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713935929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713935929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713935931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:35,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713935932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:36,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:36,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:36,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:36,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:36,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:36,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:36,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:36,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 2024-11-27T13:24:36,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/ff8ee2d22c51461b826a2a6a5eb5e815 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815 2024-11-27T13:24:36,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T13:24:36,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/8dd0e46b42a149049f8670c7cb317eef as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef 2024-11-27T13:24:36,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T13:24:36,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 2024-11-27T13:24:36,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1, entries=150, sequenceid=15, filesize=11.7 K 2024-11-27T13:24:36,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 160ba87e97489a540350dc572e5f397d in 1015ms, sequenceid=15, compaction requested=false 2024-11-27T13:24:36,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:36,345 DEBUG [master/a0541979a851:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3966f1275e7e6d5ced325aca1684d4b9 changed from -1.0 to 0.0, refreshing cache 2024-11-27T13:24:36,352 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:36,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-27T13:24:36,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:36,353 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:36,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:36,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/24b37896175a4a5694786c894fb4f2df is 50, key is test_row_0/A:col10/1732713875313/Put/seqid=0 2024-11-27T13:24:36,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742091_1267 (size=12001) 2024-11-27T13:24:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:36,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:36,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713936439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713936440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713936440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713936441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713936441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713936543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713936543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713936544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713936544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713936544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713936745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713936745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713936746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713936747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713936747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:36,765 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/24b37896175a4a5694786c894fb4f2df 2024-11-27T13:24:36,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c6617569dd4d49ca919d876c31b7ed8f is 50, key is test_row_0/B:col10/1732713875313/Put/seqid=0 2024-11-27T13:24:36,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742092_1268 (size=12001) 2024-11-27T13:24:37,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713937048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713937049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713937050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713937050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713937052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,179 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c6617569dd4d49ca919d876c31b7ed8f 2024-11-27T13:24:37,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b is 50, key is test_row_0/C:col10/1732713875313/Put/seqid=0 2024-11-27T13:24:37,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742093_1269 (size=12001) 2024-11-27T13:24:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:37,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713937553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713937554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713937555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713937556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713937557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:37,593 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b 2024-11-27T13:24:37,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/24b37896175a4a5694786c894fb4f2df as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df 2024-11-27T13:24:37,603 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:24:37,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c6617569dd4d49ca919d876c31b7ed8f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f 2024-11-27T13:24:37,608 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:24:37,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b 2024-11-27T13:24:37,613 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b, entries=150, sequenceid=38, filesize=11.7 K 2024-11-27T13:24:37,614 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 160ba87e97489a540350dc572e5f397d in 1261ms, sequenceid=38, compaction requested=false 2024-11-27T13:24:37,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:37,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:37,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-27T13:24:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-27T13:24:37,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-27T13:24:37,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3360 sec 2024-11-27T13:24:37,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.3430 sec 2024-11-27T13:24:38,331 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:24:38,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:38,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:24:38,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:38,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:38,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:38,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:38,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:38,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:38,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c4d2b77dbf1645ceb7c1991070ccda33 is 50, key is test_row_0/A:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:38,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742094_1270 (size=12001) 2024-11-27T13:24:38,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c4d2b77dbf1645ceb7c1991070ccda33 2024-11-27T13:24:38,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/975121e1c08e4ae4833d424c6464cda6 is 50, key is test_row_0/B:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:38,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713938578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713938579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713938580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713938581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713938585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742095_1271 (size=12001) 2024-11-27T13:24:38,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/975121e1c08e4ae4833d424c6464cda6 2024-11-27T13:24:38,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/daece1105b78409c90dbe82ab635acc5 is 50, key is test_row_0/C:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:38,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742096_1272 (size=12001) 2024-11-27T13:24:38,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713938683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713938684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713938685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713938687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713938698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713938885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713938886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713938888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713938890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:38,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713938901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/daece1105b78409c90dbe82ab635acc5 2024-11-27T13:24:39,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c4d2b77dbf1645ceb7c1991070ccda33 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33 2024-11-27T13:24:39,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T13:24:39,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/975121e1c08e4ae4833d424c6464cda6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6 2024-11-27T13:24:39,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T13:24:39,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/daece1105b78409c90dbe82ab635acc5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5 2024-11-27T13:24:39,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5, entries=150, sequenceid=53, filesize=11.7 K 2024-11-27T13:24:39,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 160ba87e97489a540350dc572e5f397d in 479ms, sequenceid=53, compaction requested=true 2024-11-27T13:24:39,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:39,039 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:39,040 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:39,041 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:39,041 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:39,041 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,042 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.2 K 2024-11-27T13:24:39,042 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:39,042 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:39,042 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,042 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff8ee2d22c51461b826a2a6a5eb5e815, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732713875290 2024-11-27T13:24:39,042 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.2 K 2024-11-27T13:24:39,043 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dd0e46b42a149049f8670c7cb317eef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732713875290 2024-11-27T13:24:39,043 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24b37896175a4a5694786c894fb4f2df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713875313 2024-11-27T13:24:39,043 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4d2b77dbf1645ceb7c1991070ccda33, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:39,043 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c6617569dd4d49ca919d876c31b7ed8f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713875313 2024-11-27T13:24:39,044 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 975121e1c08e4ae4833d424c6464cda6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:39,055 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:39,056 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8ff886562e994e70b39744e37064932b is 50, key is test_row_0/A:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:39,070 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#224 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:39,071 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c31f30786d764146a7657f6c72b54f7f is 50, key is test_row_0/B:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:39,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742097_1273 (size=12104) 2024-11-27T13:24:39,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742098_1274 (size=12104) 2024-11-27T13:24:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:39,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:39,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c7331aefbebb473f94fdc8b8a62cd7b9 is 50, key is test_row_0/A:col10/1732713879190/Put/seqid=0 2024-11-27T13:24:39,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742099_1275 (size=16681) 2024-11-27T13:24:39,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713939204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713939205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713939235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713939235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713939235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713939336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713939340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713939340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713939342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-27T13:24:39,385 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-27T13:24:39,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-27T13:24:39,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:39,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:39,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:39,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:39,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:39,508 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8ff886562e994e70b39744e37064932b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8ff886562e994e70b39744e37064932b 2024-11-27T13:24:39,513 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 8ff886562e994e70b39744e37064932b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:39,513 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:39,513 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713879039; duration=0sec 2024-11-27T13:24:39,514 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:39,514 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:39,514 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:39,515 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:39,515 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:39,515 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,515 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.2 K 2024-11-27T13:24:39,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c9d4840f89f48a5b5c6bc47f7fffcf1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732713875290 2024-11-27T13:24:39,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2cc3a0ba1de421c9d4b4b74a56a5f2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732713875313 2024-11-27T13:24:39,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting daece1105b78409c90dbe82ab635acc5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:39,528 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:39,529 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/ac2a0acc4fb948129477fdff80231731 is 50, key is test_row_0/C:col10/1732713876436/Put/seqid=0 2024-11-27T13:24:39,532 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c31f30786d764146a7657f6c72b54f7f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c31f30786d764146a7657f6c72b54f7f 2024-11-27T13:24:39,537 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into c31f30786d764146a7657f6c72b54f7f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:39,537 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:39,537 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713879040; duration=0sec 2024-11-27T13:24:39,537 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:39,537 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:39,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713939539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:39,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-27T13:24:39,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:39,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:39,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:39,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:39,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713939543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713939544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742100_1276 (size=12104) 2024-11-27T13:24:39,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713939546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,552 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/ac2a0acc4fb948129477fdff80231731 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/ac2a0acc4fb948129477fdff80231731 2024-11-27T13:24:39,557 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into ac2a0acc4fb948129477fdff80231731(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:39,557 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:39,557 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713879040; duration=0sec 2024-11-27T13:24:39,557 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:39,557 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:39,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c7331aefbebb473f94fdc8b8a62cd7b9 2024-11-27T13:24:39,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/699d19a808a24c5696e98cbfae8b248d is 50, key is test_row_0/B:col10/1732713879190/Put/seqid=0 2024-11-27T13:24:39,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742101_1277 (size=12001) 2024-11-27T13:24:39,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/699d19a808a24c5696e98cbfae8b248d 2024-11-27T13:24:39,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/124a90119a234d83b958464da7af0d01 is 50, key is test_row_0/C:col10/1732713879190/Put/seqid=0 2024-11-27T13:24:39,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742102_1278 (size=12001) 2024-11-27T13:24:39,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/124a90119a234d83b958464da7af0d01 2024-11-27T13:24:39,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c7331aefbebb473f94fdc8b8a62cd7b9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9 2024-11-27T13:24:39,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9, entries=250, sequenceid=77, filesize=16.3 K 2024-11-27T13:24:39,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/699d19a808a24c5696e98cbfae8b248d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d 2024-11-27T13:24:39,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T13:24:39,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/124a90119a234d83b958464da7af0d01 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01 2024-11-27T13:24:39,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T13:24:39,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 160ba87e97489a540350dc572e5f397d in 495ms, sequenceid=77, compaction requested=false 2024-11-27T13:24:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:39,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:39,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:39,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-27T13:24:39,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:39,695 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:39,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:39,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e4adb651eb2d45b6831cf4506e9a9d8d is 50, key is test_row_0/A:col10/1732713879204/Put/seqid=0 2024-11-27T13:24:39,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742103_1279 (size=12001) 2024-11-27T13:24:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:39,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:39,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713939805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713939843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713939847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713939848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713939850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713939908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:40,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713940112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,114 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e4adb651eb2d45b6831cf4506e9a9d8d 2024-11-27T13:24:40,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/190590559e514ed7b488f02a1f544fb3 is 50, key is test_row_0/B:col10/1732713879204/Put/seqid=0 2024-11-27T13:24:40,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742104_1280 (size=12001) 2024-11-27T13:24:40,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713940348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713940351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713940351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713940353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713940414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:40,528 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/190590559e514ed7b488f02a1f544fb3 2024-11-27T13:24:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5f238d5112854c7388e555a619f5c6be is 50, key is test_row_0/C:col10/1732713879204/Put/seqid=0 2024-11-27T13:24:40,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742105_1281 (size=12001) 2024-11-27T13:24:40,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:40,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713940917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:40,945 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5f238d5112854c7388e555a619f5c6be 2024-11-27T13:24:40,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e4adb651eb2d45b6831cf4506e9a9d8d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d 2024-11-27T13:24:40,955 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d, entries=150, sequenceid=92, filesize=11.7 K 2024-11-27T13:24:40,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/190590559e514ed7b488f02a1f544fb3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3 2024-11-27T13:24:40,960 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3, entries=150, sequenceid=92, filesize=11.7 K 2024-11-27T13:24:40,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5f238d5112854c7388e555a619f5c6be as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be 2024-11-27T13:24:40,969 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be, entries=150, sequenceid=92, filesize=11.7 K 2024-11-27T13:24:40,970 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 160ba87e97489a540350dc572e5f397d in 1275ms, sequenceid=92, compaction requested=true 2024-11-27T13:24:40,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:40,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:40,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-27T13:24:40,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-27T13:24:40,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-27T13:24:40,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5840 sec 2024-11-27T13:24:40,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.5880 sec 2024-11-27T13:24:41,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:41,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:24:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:41,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b3a3293d7be84037820337117ad38227 is 50, key is test_row_0/A:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:41,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742106_1282 (size=12001) 2024-11-27T13:24:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713941364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713941371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713941372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713941372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713941473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713941473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713941475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713941476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-27T13:24:41,492 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-27T13:24:41,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:41,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-27T13:24:41,499 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:41,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:41,499 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:41,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:41,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:41,651 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:41,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T13:24:41,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:41,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713941676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713941676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713941679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713941680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b3a3293d7be84037820337117ad38227 2024-11-27T13:24:41,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/3efa0ec56ea8459384dcb65dedd49c0b is 50, key is test_row_0/B:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:41,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:41,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:41,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T13:24:41,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:41,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742107_1283 (size=12001) 2024-11-27T13:24:41,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713941926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,957 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:41,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T13:24:41,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:41,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:41,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:41,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713941980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713941980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713941983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:41,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713941984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:42,111 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:42,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T13:24:42,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:42,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:42,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:42,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:42,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/3efa0ec56ea8459384dcb65dedd49c0b 2024-11-27T13:24:42,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d977de62d04d4c5a8a401496d98bf314 is 50, key is test_row_0/C:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:42,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742108_1284 (size=12001) 2024-11-27T13:24:42,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d977de62d04d4c5a8a401496d98bf314 2024-11-27T13:24:42,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b3a3293d7be84037820337117ad38227 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227 2024-11-27T13:24:42,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T13:24:42,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/3efa0ec56ea8459384dcb65dedd49c0b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b 2024-11-27T13:24:42,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T13:24:42,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d977de62d04d4c5a8a401496d98bf314 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314 2024-11-27T13:24:42,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T13:24:42,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 160ba87e97489a540350dc572e5f397d in 888ms, sequenceid=117, compaction requested=true 2024-11-27T13:24:42,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:42,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:42,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:42,244 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:42,244 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:42,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:42,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:42,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:42,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:42,246 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:42,246 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:42,246 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,246 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c31f30786d764146a7657f6c72b54f7f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=47.0 K 2024-11-27T13:24:42,246 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:42,246 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:42,246 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,247 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8ff886562e994e70b39744e37064932b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=51.5 K 2024-11-27T13:24:42,247 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ff886562e994e70b39744e37064932b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:42,247 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c31f30786d764146a7657f6c72b54f7f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:42,247 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7331aefbebb473f94fdc8b8a62cd7b9, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713878577 2024-11-27T13:24:42,248 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4adb651eb2d45b6831cf4506e9a9d8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713879201 2024-11-27T13:24:42,248 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3a3293d7be84037820337117ad38227, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:42,248 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 699d19a808a24c5696e98cbfae8b248d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713878577 2024-11-27T13:24:42,248 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 190590559e514ed7b488f02a1f544fb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713879201 2024-11-27T13:24:42,249 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3efa0ec56ea8459384dcb65dedd49c0b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:42,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:42,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-27T13:24:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,265 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:42,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:42,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:42,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:42,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:42,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:42,274 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:42,275 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8bb12e4401cf49269901a05bd03dfcab is 50, key is test_row_0/A:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:42,282 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:42,283 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/0faaa22b93764825896ef4f06048138c is 50, key is test_row_0/B:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:42,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/a6b0070f57824ecc8949d63ce1511135 is 50, key is test_row_0/A:col10/1732713881365/Put/seqid=0 2024-11-27T13:24:42,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742110_1286 (size=12241) 2024-11-27T13:24:42,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742111_1287 (size=12001) 2024-11-27T13:24:42,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742109_1285 (size=12241) 2024-11-27T13:24:42,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:42,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:42,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713942514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713942515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713942516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713942517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:42,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713942618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713942620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713942620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713942620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,707 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/a6b0070f57824ecc8949d63ce1511135 2024-11-27T13:24:42,708 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/0faaa22b93764825896ef4f06048138c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0faaa22b93764825896ef4f06048138c 2024-11-27T13:24:42,716 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8bb12e4401cf49269901a05bd03dfcab as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8bb12e4401cf49269901a05bd03dfcab 2024-11-27T13:24:42,717 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 0faaa22b93764825896ef4f06048138c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:42,717 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:42,717 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=12, startTime=1732713882244; duration=0sec 2024-11-27T13:24:42,717 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:42,718 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:42,718 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:42,720 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:42,720 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:42,721 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:42,721 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/ac2a0acc4fb948129477fdff80231731, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=47.0 K 2024-11-27T13:24:42,721 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ac2a0acc4fb948129477fdff80231731, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732713876436 2024-11-27T13:24:42,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 8bb12e4401cf49269901a05bd03dfcab(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 124a90119a234d83b958464da7af0d01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713878577 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:42,722 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=12, startTime=1732713882244; duration=0sec 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f238d5112854c7388e555a619f5c6be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713879201 2024-11-27T13:24:42,722 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d977de62d04d4c5a8a401496d98bf314, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:42,731 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#238 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:42,732 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/78f5f377f2484072ae31403d2a93ccbb is 50, key is test_row_0/C:col10/1732713879803/Put/seqid=0 2024-11-27T13:24:42,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1e702d6b9f92498499f0750cd4cabcce is 50, key is test_row_0/B:col10/1732713881365/Put/seqid=0 2024-11-27T13:24:42,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742112_1288 (size=12241) 2024-11-27T13:24:42,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742113_1289 (size=12001) 2024-11-27T13:24:42,752 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/78f5f377f2484072ae31403d2a93ccbb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/78f5f377f2484072ae31403d2a93ccbb 2024-11-27T13:24:42,753 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1e702d6b9f92498499f0750cd4cabcce 2024-11-27T13:24:42,759 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into 78f5f377f2484072ae31403d2a93ccbb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:42,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:42,759 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=12, startTime=1732713882245; duration=0sec 2024-11-27T13:24:42,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:42,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:42,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/7f62c10de9484ef684dedd67f04a7b5a is 50, key is test_row_0/C:col10/1732713881365/Put/seqid=0 2024-11-27T13:24:42,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742114_1290 (size=12001) 2024-11-27T13:24:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713942822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713942822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713942822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:42,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713942824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713943126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713943126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713943126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713943129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,165 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/7f62c10de9484ef684dedd67f04a7b5a 2024-11-27T13:24:43,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/a6b0070f57824ecc8949d63ce1511135 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135 2024-11-27T13:24:43,174 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135, entries=150, sequenceid=128, filesize=11.7 K 2024-11-27T13:24:43,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1e702d6b9f92498499f0750cd4cabcce as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce 2024-11-27T13:24:43,179 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce, entries=150, sequenceid=128, filesize=11.7 K 2024-11-27T13:24:43,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/7f62c10de9484ef684dedd67f04a7b5a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a 2024-11-27T13:24:43,184 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a, entries=150, sequenceid=128, filesize=11.7 K 2024-11-27T13:24:43,184 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 160ba87e97489a540350dc572e5f397d in 919ms, sequenceid=128, compaction requested=false 2024-11-27T13:24:43,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:43,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:43,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-27T13:24:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-27T13:24:43,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-27T13:24:43,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6870 sec 2024-11-27T13:24:43,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.6940 sec 2024-11-27T13:24:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-27T13:24:43,603 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-27T13:24:43,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-27T13:24:43,606 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:43,607 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:43,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:43,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T13:24:43,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:43,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:43,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:43,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:43,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:43,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:43,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4533) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4953) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4947) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4943) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3233) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713943639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713943640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713943642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713943642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b35d9d02dcd64e978cbf34d470321afd is 50, key is test_row_0/A:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742115_1291 (size=12151) 2024-11-27T13:24:43,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b35d9d02dcd64e978cbf34d470321afd 2024-11-27T13:24:43,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/20d5661711354b4ab7cb587d186989db is 50, key is test_row_0/B:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:43,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742116_1292 (size=12151) 2024-11-27T13:24:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:43,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713943742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713943743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713943744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713943745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,759 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:43,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:43,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:43,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:43,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:43,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:43,911 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:43,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:43,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:43,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:43,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:43,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:43,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713943943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,944 DEBUG [Thread-1207 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:43,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713943945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713943946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713943949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:43,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:43,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713943949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,064 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:44,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:44,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:44,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/20d5661711354b4ab7cb587d186989db 2024-11-27T13:24:44,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5680499b10d341b494d8089af8b46da2 is 50, key is test_row_0/C:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:44,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742117_1293 (size=12151) 2024-11-27T13:24:44,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:44,217 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:44,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:44,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:44,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713944247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713944250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713944252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713944253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,370 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:44,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:44,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:44,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5680499b10d341b494d8089af8b46da2 2024-11-27T13:24:44,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/b35d9d02dcd64e978cbf34d470321afd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd 2024-11-27T13:24:44,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd, entries=150, sequenceid=160, filesize=11.9 K 2024-11-27T13:24:44,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/20d5661711354b4ab7cb587d186989db as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db 2024-11-27T13:24:44,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db, entries=150, sequenceid=160, filesize=11.9 K 2024-11-27T13:24:44,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5680499b10d341b494d8089af8b46da2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2 2024-11-27T13:24:44,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2, entries=150, sequenceid=160, filesize=11.9 K 2024-11-27T13:24:44,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:44,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:44,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:44,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 160ba87e97489a540350dc572e5f397d in 890ms, sequenceid=160, compaction requested=true 2024-11-27T13:24:44,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:44,525 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:44,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:44,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:44,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:44,525 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:44,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:44,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:44,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:44,526 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:44,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:44,526 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:44,526 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,526 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8bb12e4401cf49269901a05bd03dfcab, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.5 K 2024-11-27T13:24:44,526 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bb12e4401cf49269901a05bd03dfcab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:44,526 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:44,527 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:44,527 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,527 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0faaa22b93764825896ef4f06048138c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.5 K 2024-11-27T13:24:44,527 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6b0070f57824ecc8949d63ce1511135, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732713881361 2024-11-27T13:24:44,527 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0faaa22b93764825896ef4f06048138c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:44,527 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b35d9d02dcd64e978cbf34d470321afd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:44,527 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e702d6b9f92498499f0750cd4cabcce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732713881361 2024-11-27T13:24:44,528 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 20d5661711354b4ab7cb587d186989db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:44,535 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:44,535 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9b480c202b5f44f9b0a6e7e43b589a90 is 50, key is test_row_0/A:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:44,538 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:44,539 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/174bdf937b70414fa5d63411e468dae5 is 50, key is test_row_0/B:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:44,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742118_1294 (size=12493) 2024-11-27T13:24:44,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742119_1295 (size=12493) 2024-11-27T13:24:44,549 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/174bdf937b70414fa5d63411e468dae5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/174bdf937b70414fa5d63411e468dae5 2024-11-27T13:24:44,556 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 174bdf937b70414fa5d63411e468dae5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:44,556 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:44,556 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713884525; duration=0sec 2024-11-27T13:24:44,556 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:44,556 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:44,556 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:44,557 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:44,558 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:44,558 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,558 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/78f5f377f2484072ae31403d2a93ccbb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.5 K 2024-11-27T13:24:44,558 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 78f5f377f2484072ae31403d2a93ccbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713879800 2024-11-27T13:24:44,559 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f62c10de9484ef684dedd67f04a7b5a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732713881361 2024-11-27T13:24:44,559 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5680499b10d341b494d8089af8b46da2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:44,567 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:44,567 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/b81ee4b872974daebc5b8b55f4324464 is 50, key is test_row_0/C:col10/1732713882513/Put/seqid=0 2024-11-27T13:24:44,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742120_1296 (size=12493) 2024-11-27T13:24:44,579 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/b81ee4b872974daebc5b8b55f4324464 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/b81ee4b872974daebc5b8b55f4324464 2024-11-27T13:24:44,591 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into b81ee4b872974daebc5b8b55f4324464(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:44,591 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:44,591 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713884526; duration=0sec 2024-11-27T13:24:44,591 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:44,591 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:44,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:44,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:44,677 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:44,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:44,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9cd1805094b34a81a38f657c74c1e4a7 is 50, key is test_row_0/A:col10/1732713883641/Put/seqid=0 2024-11-27T13:24:44,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742121_1297 (size=12151) 2024-11-27T13:24:44,689 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9cd1805094b34a81a38f657c74c1e4a7 2024-11-27T13:24:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5aeb276d7a284d628363e874f73b55f2 is 50, key is test_row_0/B:col10/1732713883641/Put/seqid=0 2024-11-27T13:24:44,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742122_1298 (size=12151) 2024-11-27T13:24:44,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:44,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:44,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713944776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713944776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713944777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713944778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713944880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713944880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713944880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713944881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:44,948 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9b480c202b5f44f9b0a6e7e43b589a90 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9b480c202b5f44f9b0a6e7e43b589a90 2024-11-27T13:24:44,954 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 9b480c202b5f44f9b0a6e7e43b589a90(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:44,954 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:44,954 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713884524; duration=0sec 2024-11-27T13:24:44,954 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:44,954 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:45,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713945083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713945083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713945083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713945092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,105 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5aeb276d7a284d628363e874f73b55f2 2024-11-27T13:24:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/e6c4af664e50431880abeca575377935 is 50, key is test_row_0/C:col10/1732713883641/Put/seqid=0 2024-11-27T13:24:45,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742123_1299 (size=12151) 2024-11-27T13:24:45,118 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/e6c4af664e50431880abeca575377935 2024-11-27T13:24:45,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9cd1805094b34a81a38f657c74c1e4a7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7 2024-11-27T13:24:45,126 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7, entries=150, sequenceid=170, filesize=11.9 K 2024-11-27T13:24:45,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5aeb276d7a284d628363e874f73b55f2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2 2024-11-27T13:24:45,132 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2, entries=150, sequenceid=170, filesize=11.9 K 2024-11-27T13:24:45,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/e6c4af664e50431880abeca575377935 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935 2024-11-27T13:24:45,137 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935, entries=150, sequenceid=170, filesize=11.9 K 2024-11-27T13:24:45,141 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 160ba87e97489a540350dc572e5f397d in 463ms, sequenceid=170, compaction requested=false 2024-11-27T13:24:45,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:45,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:45,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-27T13:24:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-27T13:24:45,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-27T13:24:45,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-11-27T13:24:45,145 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.5390 sec 2024-11-27T13:24:45,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:45,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:45,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713945392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713945394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713945394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9290e02f13ed40b59fb21ee7859b1498 is 50, key is test_row_0/A:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713945396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742124_1300 (size=12151) 2024-11-27T13:24:45,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713945496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713945496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713945496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713945698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713945699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713945700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-27T13:24:45,711 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-27T13:24:45,712 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-27T13:24:45,714 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T13:24:45,714 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:45,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:45,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9290e02f13ed40b59fb21ee7859b1498 2024-11-27T13:24:45,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c0b3303b2627445aa4a40be0eafd25a3 is 50, key is test_row_0/B:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T13:24:45,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742125_1301 (size=12151) 2024-11-27T13:24:45,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c0b3303b2627445aa4a40be0eafd25a3 2024-11-27T13:24:45,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/6b791bea7a624570ba177d1c45d4dd29 is 50, key is test_row_0/C:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742126_1302 (size=12151) 2024-11-27T13:24:45,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/6b791bea7a624570ba177d1c45d4dd29 2024-11-27T13:24:45,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/9290e02f13ed40b59fb21ee7859b1498 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498 2024-11-27T13:24:45,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498, entries=150, sequenceid=200, filesize=11.9 K 2024-11-27T13:24:45,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c0b3303b2627445aa4a40be0eafd25a3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3 2024-11-27T13:24:45,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3, entries=150, sequenceid=200, filesize=11.9 K 2024-11-27T13:24:45,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/6b791bea7a624570ba177d1c45d4dd29 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29 2024-11-27T13:24:45,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29, entries=150, sequenceid=200, filesize=11.9 K 2024-11-27T13:24:45,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 160ba87e97489a540350dc572e5f397d in 461ms, sequenceid=200, compaction requested=true 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:45,849 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:45,849 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:45,850 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:45,850 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:45,850 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:45,850 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:45,850 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:45,850 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:45,851 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/174bdf937b70414fa5d63411e468dae5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.9 K 2024-11-27T13:24:45,851 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9b480c202b5f44f9b0a6e7e43b589a90, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.9 K 2024-11-27T13:24:45,851 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 174bdf937b70414fa5d63411e468dae5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:45,852 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b480c202b5f44f9b0a6e7e43b589a90, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:45,852 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aeb276d7a284d628363e874f73b55f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732713883636 2024-11-27T13:24:45,852 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cd1805094b34a81a38f657c74c1e4a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732713883636 2024-11-27T13:24:45,852 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c0b3303b2627445aa4a40be0eafd25a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:45,853 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9290e02f13ed40b59fb21ee7859b1498, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:45,862 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:45,863 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/dfb4264917f543309e794383b10d2a13 is 50, key is test_row_0/A:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,866 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:45,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:45,867 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/41786ab74029462980ed1dfb9a0376e0 is 50, key is test_row_0/B:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-27T13:24:45,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:45,867 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:45,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/697e11f83acd46d8a9a32091abd7facd is 50, key is test_row_0/A:col10/1732713885389/Put/seqid=0 2024-11-27T13:24:45,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742127_1303 (size=12595) 2024-11-27T13:24:45,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742128_1304 (size=12595) 2024-11-27T13:24:45,906 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/41786ab74029462980ed1dfb9a0376e0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/41786ab74029462980ed1dfb9a0376e0 2024-11-27T13:24:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:45,911 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 41786ab74029462980ed1dfb9a0376e0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:45,911 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:45,911 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713885849; duration=0sec 2024-11-27T13:24:45,911 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:45,911 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:45,911 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:45,912 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:45,913 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:45,913 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:45,913 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/b81ee4b872974daebc5b8b55f4324464, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=35.9 K 2024-11-27T13:24:45,914 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b81ee4b872974daebc5b8b55f4324464, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1732713882513 2024-11-27T13:24:45,914 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e6c4af664e50431880abeca575377935, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732713883636 2024-11-27T13:24:45,914 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b791bea7a624570ba177d1c45d4dd29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:45,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742129_1305 (size=12151) 2024-11-27T13:24:45,920 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/697e11f83acd46d8a9a32091abd7facd 2024-11-27T13:24:45,925 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:45,926 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/38b9fe95c3df44f88f8be2a3448f2a79 is 50, key is test_row_0/C:col10/1732713884771/Put/seqid=0 2024-11-27T13:24:45,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/71eaac90b06d4d8d968b3d115f99a424 is 50, key is test_row_0/B:col10/1732713885389/Put/seqid=0 2024-11-27T13:24:45,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742130_1306 (size=12595) 2024-11-27T13:24:45,946 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/38b9fe95c3df44f88f8be2a3448f2a79 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/38b9fe95c3df44f88f8be2a3448f2a79 2024-11-27T13:24:45,951 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into 38b9fe95c3df44f88f8be2a3448f2a79(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:45,951 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:45,951 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713885849; duration=0sec 2024-11-27T13:24:45,951 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:45,951 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:45,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742131_1307 (size=12151) 2024-11-27T13:24:45,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713945985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713946001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713946003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713946003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T13:24:46,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713946088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713946290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,305 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/dfb4264917f543309e794383b10d2a13 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/dfb4264917f543309e794383b10d2a13 2024-11-27T13:24:46,315 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into dfb4264917f543309e794383b10d2a13(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:46,315 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:46,315 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713885849; duration=0sec 2024-11-27T13:24:46,315 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:46,315 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T13:24:46,362 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/71eaac90b06d4d8d968b3d115f99a424 2024-11-27T13:24:46,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5bcefc44353944d98b185493cbafb9a2 is 50, key is test_row_0/C:col10/1732713885389/Put/seqid=0 2024-11-27T13:24:46,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742132_1308 (size=12151) 2024-11-27T13:24:46,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713946503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713946505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713946509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713946592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:46,776 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5bcefc44353944d98b185493cbafb9a2 2024-11-27T13:24:46,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/697e11f83acd46d8a9a32091abd7facd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd 2024-11-27T13:24:46,785 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd, entries=150, sequenceid=208, filesize=11.9 K 2024-11-27T13:24:46,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/71eaac90b06d4d8d968b3d115f99a424 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424 2024-11-27T13:24:46,790 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424, entries=150, sequenceid=208, filesize=11.9 K 2024-11-27T13:24:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/5bcefc44353944d98b185493cbafb9a2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2 2024-11-27T13:24:46,794 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2, entries=150, sequenceid=208, filesize=11.9 K 2024-11-27T13:24:46,795 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 160ba87e97489a540350dc572e5f397d in 928ms, sequenceid=208, compaction requested=false 2024-11-27T13:24:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:46,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-27T13:24:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-27T13:24:46,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-27T13:24:46,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0830 sec 2024-11-27T13:24:46,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.0880 sec 2024-11-27T13:24:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-27T13:24:46,817 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-27T13:24:46,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-27T13:24:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T13:24:46,821 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:46,821 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:46,821 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:46,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T13:24:46,973 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:46,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:46,974 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:46,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:46,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:46,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/af5088c90a5a4b2b844790c77ba53494 is 50, key is test_row_0/A:col10/1732713885983/Put/seqid=0 2024-11-27T13:24:47,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742133_1309 (size=12151) 2024-11-27T13:24:47,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:47,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:47,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713947109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T13:24:47,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713947211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,403 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/af5088c90a5a4b2b844790c77ba53494 2024-11-27T13:24:47,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/f80c1e82fecc4f71be58b39f85e2d964 is 50, key is test_row_0/B:col10/1732713885983/Put/seqid=0 2024-11-27T13:24:47,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713947413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742134_1310 (size=12151) 2024-11-27T13:24:47,416 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/f80c1e82fecc4f71be58b39f85e2d964 2024-11-27T13:24:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T13:24:47,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/09b211349d4243cfb4ef1363f202cf21 is 50, key is test_row_0/C:col10/1732713885983/Put/seqid=0 2024-11-27T13:24:47,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742135_1311 (size=12151) 2024-11-27T13:24:47,431 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/09b211349d4243cfb4ef1363f202cf21 2024-11-27T13:24:47,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/af5088c90a5a4b2b844790c77ba53494 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494 2024-11-27T13:24:47,439 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T13:24:47,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/f80c1e82fecc4f71be58b39f85e2d964 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964 2024-11-27T13:24:47,443 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T13:24:47,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/09b211349d4243cfb4ef1363f202cf21 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21 2024-11-27T13:24:47,448 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T13:24:47,449 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 160ba87e97489a540350dc572e5f397d in 475ms, sequenceid=239, compaction requested=true 2024-11-27T13:24:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:47,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-27T13:24:47,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-27T13:24:47,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-27T13:24:47,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 629 msec 2024-11-27T13:24:47,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 634 msec 2024-11-27T13:24:47,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:47,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:47,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:47,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/49397b4d85ef4c8db88e4ebd36612ef4 is 50, key is test_row_0/A:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742136_1312 (size=12151) 2024-11-27T13:24:47,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/49397b4d85ef4c8db88e4ebd36612ef4 2024-11-27T13:24:47,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713947546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713947547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713947551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/bcfb61ac94db4f03b4d670944d240268 is 50, key is test_row_0/B:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:47,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742137_1313 (size=12151) 2024-11-27T13:24:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713947656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713947656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713947656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713947716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713947859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713947859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713947860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-27T13:24:47,924 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-27T13:24:47,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:47,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-27T13:24:47,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T13:24:47,927 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:47,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:47,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:47,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:47,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36526 deadline: 1732713947957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:47,959 DEBUG [Thread-1207 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:47,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/bcfb61ac94db4f03b4d670944d240268 2024-11-27T13:24:47,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b is 50, key is test_row_0/C:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:47,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742138_1314 (size=12151) 2024-11-27T13:24:47,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b 2024-11-27T13:24:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/49397b4d85ef4c8db88e4ebd36612ef4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4 2024-11-27T13:24:48,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4, entries=150, sequenceid=251, filesize=11.9 K 2024-11-27T13:24:48,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/bcfb61ac94db4f03b4d670944d240268 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268 2024-11-27T13:24:48,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268, entries=150, sequenceid=251, filesize=11.9 K 2024-11-27T13:24:48,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b 2024-11-27T13:24:48,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b, entries=150, sequenceid=251, filesize=11.9 K 2024-11-27T13:24:48,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 160ba87e97489a540350dc572e5f397d in 500ms, sequenceid=251, compaction requested=true 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:48,017 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:48,018 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:48,024 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:48,024 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:48,024 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:48,024 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:48,025 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:48,025 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:48,025 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/41786ab74029462980ed1dfb9a0376e0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=47.9 K 2024-11-27T13:24:48,025 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/dfb4264917f543309e794383b10d2a13, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=47.9 K 2024-11-27T13:24:48,025 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfb4264917f543309e794383b10d2a13, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:48,025 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 41786ab74029462980ed1dfb9a0376e0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:48,025 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 71eaac90b06d4d8d968b3d115f99a424, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732713885389 2024-11-27T13:24:48,025 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 697e11f83acd46d8a9a32091abd7facd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732713885389 2024-11-27T13:24:48,026 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting af5088c90a5a4b2b844790c77ba53494, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713885959 2024-11-27T13:24:48,026 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f80c1e82fecc4f71be58b39f85e2d964, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713885959 2024-11-27T13:24:48,026 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49397b4d85ef4c8db88e4ebd36612ef4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:48,026 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bcfb61ac94db4f03b4d670944d240268, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:48,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T13:24:48,037 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:48,037 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:48,037 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/3b72756b92da48fca3e50720bbaa2b85 is 50, key is test_row_0/A:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:48,038 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/40c15f92592c413cbe0839c974511083 is 50, key is test_row_0/B:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:48,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742140_1316 (size=12731) 2024-11-27T13:24:48,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742139_1315 (size=12731) 2024-11-27T13:24:48,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:48,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-27T13:24:48,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:48,082 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/40c15f92592c413cbe0839c974511083 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/40c15f92592c413cbe0839c974511083 2024-11-27T13:24:48,082 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:48,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,089 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 40c15f92592c413cbe0839c974511083(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:48,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:48,089 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=12, startTime=1732713888017; duration=0sec 2024-11-27T13:24:48,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:48,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:48,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:24:48,091 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:24:48,091 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:48,091 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:48,091 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/38b9fe95c3df44f88f8be2a3448f2a79, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=47.9 K 2024-11-27T13:24:48,092 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 38b9fe95c3df44f88f8be2a3448f2a79, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713884771 2024-11-27T13:24:48,092 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bcefc44353944d98b185493cbafb9a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732713885389 2024-11-27T13:24:48,093 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 09b211349d4243cfb4ef1363f202cf21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713885959 2024-11-27T13:24:48,093 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ba4ee9b5894bd09cf8cca9a4a7cb2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:48,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/cdc25422034f40a1876e4f7d06318932 is 50, key is test_row_0/A:col10/1732713887549/Put/seqid=0 2024-11-27T13:24:48,118 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#268 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:48,119 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f983b429edce4dd2a26ba74c9b040e83 is 50, key is test_row_0/C:col10/1732713887106/Put/seqid=0 2024-11-27T13:24:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742141_1317 (size=12301) 2024-11-27T13:24:48,142 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/cdc25422034f40a1876e4f7d06318932 2024-11-27T13:24:48,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/448e8486c5734fc98e4176c26c0c57fd is 50, key is test_row_0/B:col10/1732713887549/Put/seqid=0 2024-11-27T13:24:48,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742142_1318 (size=12731) 2024-11-27T13:24:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:48,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:48,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742143_1319 (size=12301) 2024-11-27T13:24:48,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713948172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713948174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713948175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713948218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T13:24:48,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713948276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713948278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713948278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,463 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/3b72756b92da48fca3e50720bbaa2b85 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/3b72756b92da48fca3e50720bbaa2b85 2024-11-27T13:24:48,468 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 3b72756b92da48fca3e50720bbaa2b85(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:48,468 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:48,468 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=12, startTime=1732713888017; duration=0sec 2024-11-27T13:24:48,468 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:48,468 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:48,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713948479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713948481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713948481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T13:24:48,567 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f983b429edce4dd2a26ba74c9b040e83 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f983b429edce4dd2a26ba74c9b040e83 2024-11-27T13:24:48,573 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into f983b429edce4dd2a26ba74c9b040e83(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:48,573 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:48,573 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=12, startTime=1732713888017; duration=0sec 2024-11-27T13:24:48,573 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:48,573 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:48,574 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/448e8486c5734fc98e4176c26c0c57fd 2024-11-27T13:24:48,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/46db4dfaeeea4f258b080961cdb1ac6d is 50, key is test_row_0/C:col10/1732713887549/Put/seqid=0 2024-11-27T13:24:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742144_1320 (size=12301) 2024-11-27T13:24:48,588 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/46db4dfaeeea4f258b080961cdb1ac6d 2024-11-27T13:24:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/cdc25422034f40a1876e4f7d06318932 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932 2024-11-27T13:24:48,597 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T13:24:48,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/448e8486c5734fc98e4176c26c0c57fd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd 2024-11-27T13:24:48,602 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T13:24:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/46db4dfaeeea4f258b080961cdb1ac6d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d 2024-11-27T13:24:48,607 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d, entries=150, sequenceid=275, filesize=12.0 K 2024-11-27T13:24:48,608 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 160ba87e97489a540350dc572e5f397d in 526ms, sequenceid=275, compaction requested=false 2024-11-27T13:24:48,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:48,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:48,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-27T13:24:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-27T13:24:48,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-27T13:24:48,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 681 msec 2024-11-27T13:24:48,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 685 msec 2024-11-27T13:24:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:48,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:48,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:48,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/da3f89ef17894ef18ad98f5bf3e615ba is 50, key is test_row_0/A:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:48,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742145_1321 (size=12301) 2024-11-27T13:24:48,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713948835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713948837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713948836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713948938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713948940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713948940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-27T13:24:49,031 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-27T13:24:49,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-27T13:24:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-27T13:24:49,033 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:49,034 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:49,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:49,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-27T13:24:49,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713949141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713949142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713949145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,190 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:49,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-27T13:24:49,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:49,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:49,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:49,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:49,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/da3f89ef17894ef18ad98f5bf3e615ba 2024-11-27T13:24:49,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2ff9def8aebc48548184c401afb7be8e is 50, key is test_row_0/B:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:49,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713949223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742146_1322 (size=12301) 2024-11-27T13:24:49,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2ff9def8aebc48548184c401afb7be8e 2024-11-27T13:24:49,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/56b95de4f1bc446b99099a67995a4d1b is 50, key is test_row_0/C:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:49,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742147_1323 (size=12301) 2024-11-27T13:24:49,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/56b95de4f1bc446b99099a67995a4d1b 2024-11-27T13:24:49,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/da3f89ef17894ef18ad98f5bf3e615ba as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba 2024-11-27T13:24:49,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba, entries=150, sequenceid=292, filesize=12.0 K 2024-11-27T13:24:49,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2ff9def8aebc48548184c401afb7be8e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e 2024-11-27T13:24:49,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e, entries=150, sequenceid=292, filesize=12.0 K 2024-11-27T13:24:49,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/56b95de4f1bc446b99099a67995a4d1b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b 2024-11-27T13:24:49,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b, entries=150, sequenceid=292, filesize=12.0 K 2024-11-27T13:24:49,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 160ba87e97489a540350dc572e5f397d in 523ms, sequenceid=292, compaction requested=true 2024-11-27T13:24:49,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:49,308 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:49,309 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:49,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:49,310 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:49,310 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,310 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/3b72756b92da48fca3e50720bbaa2b85, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.5 K 2024-11-27T13:24:49,310 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/40c15f92592c413cbe0839c974511083, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.5 K 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b72756b92da48fca3e50720bbaa2b85, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:49,310 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 40c15f92592c413cbe0839c974511083, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:49,311 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdc25422034f40a1876e4f7d06318932, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732713887539 2024-11-27T13:24:49,311 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 448e8486c5734fc98e4176c26c0c57fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732713887539 2024-11-27T13:24:49,312 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff9def8aebc48548184c401afb7be8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:49,312 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting da3f89ef17894ef18ad98f5bf3e615ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:49,326 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:49,327 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/e40057f034994b1ea571aea8391ed0fb is 50, key is test_row_0/B:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:49,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-27T13:24:49,340 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#275 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:49,341 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/4c46d799428341dd87b7d7a056690269 is 50, key is test_row_0/A:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:49,344 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:49,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-27T13:24:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,345 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:49,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:49,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/07327cbe06bc4020994dbf078e554333 is 50, key is test_row_0/A:col10/1732713888835/Put/seqid=0 2024-11-27T13:24:49,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742148_1324 (size=12983) 2024-11-27T13:24:49,368 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/e40057f034994b1ea571aea8391ed0fb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/e40057f034994b1ea571aea8391ed0fb 2024-11-27T13:24:49,377 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into e40057f034994b1ea571aea8391ed0fb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:49,377 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:49,377 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713889309; duration=0sec 2024-11-27T13:24:49,377 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:49,377 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:49,377 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:49,379 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:49,379 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:49,379 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,379 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f983b429edce4dd2a26ba74c9b040e83, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.5 K 2024-11-27T13:24:49,380 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f983b429edce4dd2a26ba74c9b040e83, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732713887106 2024-11-27T13:24:49,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742149_1325 (size=12983) 2024-11-27T13:24:49,380 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 46db4dfaeeea4f258b080961cdb1ac6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732713887539 2024-11-27T13:24:49,380 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 56b95de4f1bc446b99099a67995a4d1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:49,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742150_1326 (size=12301) 2024-11-27T13:24:49,382 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/07327cbe06bc4020994dbf078e554333 2024-11-27T13:24:49,387 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/4c46d799428341dd87b7d7a056690269 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/4c46d799428341dd87b7d7a056690269 2024-11-27T13:24:49,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/86bdd34e0447401ab71d8a8fb86e9d98 is 50, key is test_row_0/B:col10/1732713888835/Put/seqid=0 2024-11-27T13:24:49,396 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#278 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:49,396 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d066603af18f418b88d5b59e886ac746 is 50, key is test_row_0/C:col10/1732713888785/Put/seqid=0 2024-11-27T13:24:49,402 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 4c46d799428341dd87b7d7a056690269(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:49,402 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:49,402 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713889308; duration=0sec 2024-11-27T13:24:49,402 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:49,402 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:49,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742151_1327 (size=12301) 2024-11-27T13:24:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742152_1328 (size=12983) 2024-11-27T13:24:49,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:49,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713949456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713949458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713949458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713949559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713949561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713949561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-27T13:24:49,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713949762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713949763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713949763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:49,805 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/86bdd34e0447401ab71d8a8fb86e9d98 2024-11-27T13:24:49,810 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/d066603af18f418b88d5b59e886ac746 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d066603af18f418b88d5b59e886ac746 2024-11-27T13:24:49,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/33429a567095425287c4f02174623a72 is 50, key is test_row_0/C:col10/1732713888835/Put/seqid=0 2024-11-27T13:24:49,818 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into d066603af18f418b88d5b59e886ac746(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:49,818 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:49,818 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713889309; duration=0sec 2024-11-27T13:24:49,818 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:49,818 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:49,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742153_1329 (size=12301) 2024-11-27T13:24:49,821 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/33429a567095425287c4f02174623a72 2024-11-27T13:24:49,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/07327cbe06bc4020994dbf078e554333 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333 2024-11-27T13:24:49,828 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T13:24:49,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/86bdd34e0447401ab71d8a8fb86e9d98 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98 2024-11-27T13:24:49,833 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T13:24:49,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/33429a567095425287c4f02174623a72 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72 2024-11-27T13:24:49,837 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72, entries=150, sequenceid=314, filesize=12.0 K 2024-11-27T13:24:49,838 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 160ba87e97489a540350dc572e5f397d in 493ms, sequenceid=314, compaction requested=false 2024-11-27T13:24:49,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:49,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:49,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-27T13:24:49,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-27T13:24:49,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-27T13:24:49,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 805 msec 2024-11-27T13:24:49,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 809 msec 2024-11-27T13:24:50,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:50,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:24:50,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:50,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:50,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:50,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/727eab659bc049b6afb20c5be1dd2e0a is 50, key is test_row_0/A:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742154_1330 (size=12301) 2024-11-27T13:24:50,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/727eab659bc049b6afb20c5be1dd2e0a 2024-11-27T13:24:50,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/a21afcf71ebd4c85aba761ef1ca1525c is 50, key is test_row_0/B:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713950088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713950088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742155_1331 (size=12301) 2024-11-27T13:24:50,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/a21afcf71ebd4c85aba761ef1ca1525c 2024-11-27T13:24:50,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713950091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f8eb10b288854406826688b5e1674d28 is 50, key is test_row_0/C:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742156_1332 (size=12301) 2024-11-27T13:24:50,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-27T13:24:50,137 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-27T13:24:50,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:50,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-27T13:24:50,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T13:24:50,140 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:50,141 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:50,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:50,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713950192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713950193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713950195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T13:24:50,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:50,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-27T13:24:50,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:50,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713950394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713950395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713950397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T13:24:50,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:50,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-27T13:24:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:50,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f8eb10b288854406826688b5e1674d28 2024-11-27T13:24:50,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/727eab659bc049b6afb20c5be1dd2e0a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a 2024-11-27T13:24:50,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a, entries=150, sequenceid=332, filesize=12.0 K 2024-11-27T13:24:50,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/a21afcf71ebd4c85aba761ef1ca1525c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c 2024-11-27T13:24:50,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c, entries=150, sequenceid=332, filesize=12.0 K 2024-11-27T13:24:50,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f8eb10b288854406826688b5e1674d28 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28 2024-11-27T13:24:50,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28, entries=150, sequenceid=332, filesize=12.0 K 2024-11-27T13:24:50,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 160ba87e97489a540350dc572e5f397d in 475ms, sequenceid=332, compaction requested=true 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:50,542 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:50,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:50,542 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:50,543 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:50,544 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:50,544 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,544 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:50,544 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/4c46d799428341dd87b7d7a056690269, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.7 K 2024-11-27T13:24:50,544 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:50,544 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,544 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/e40057f034994b1ea571aea8391ed0fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.7 K 2024-11-27T13:24:50,544 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c46d799428341dd87b7d7a056690269, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:50,544 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e40057f034994b1ea571aea8391ed0fb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:50,545 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07327cbe06bc4020994dbf078e554333, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732713888825 2024-11-27T13:24:50,545 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 86bdd34e0447401ab71d8a8fb86e9d98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732713888825 2024-11-27T13:24:50,545 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 727eab659bc049b6afb20c5be1dd2e0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:50,545 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a21afcf71ebd4c85aba761ef1ca1525c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:50,554 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:50,555 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#284 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:50,555 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/908f605917374a5ab55acddfce590261 is 50, key is test_row_0/B:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,555 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/de244090cf804a37bf94b5abbf84c548 is 50, key is test_row_0/A:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742157_1333 (size=13085) 2024-11-27T13:24:50,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742158_1334 (size=13085) 2024-11-27T13:24:50,570 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/de244090cf804a37bf94b5abbf84c548 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/de244090cf804a37bf94b5abbf84c548 2024-11-27T13:24:50,576 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into de244090cf804a37bf94b5abbf84c548(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:50,576 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:50,576 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713890542; duration=0sec 2024-11-27T13:24:50,576 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:50,576 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:50,576 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:50,577 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:50,577 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:50,578 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,578 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d066603af18f418b88d5b59e886ac746, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.7 K 2024-11-27T13:24:50,578 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d066603af18f418b88d5b59e886ac746, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713888173 2024-11-27T13:24:50,579 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33429a567095425287c4f02174623a72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732713888825 2024-11-27T13:24:50,579 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8eb10b288854406826688b5e1674d28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:50,598 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:50,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:50,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-27T13:24:50,599 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/4da083fdedd74589908807fde18e7bb4 is 50, key is test_row_0/C:col10/1732713890066/Put/seqid=0 2024-11-27T13:24:50,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:50,599 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:50,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:50,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742159_1335 (size=13085) 2024-11-27T13:24:50,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/845bdd99e2a641fda241f2a0dd653113 is 50, key is test_row_0/A:col10/1732713890085/Put/seqid=0 2024-11-27T13:24:50,616 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/4da083fdedd74589908807fde18e7bb4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4da083fdedd74589908807fde18e7bb4 2024-11-27T13:24:50,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742160_1336 (size=12301) 2024-11-27T13:24:50,622 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into 4da083fdedd74589908807fde18e7bb4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:50,623 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:50,623 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713890542; duration=0sec 2024-11-27T13:24:50,623 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:50,623 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:50,623 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/845bdd99e2a641fda241f2a0dd653113 2024-11-27T13:24:50,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b1d5dc0987db4411a3e975033a712f4f is 50, key is test_row_0/B:col10/1732713890085/Put/seqid=0 2024-11-27T13:24:50,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742161_1337 (size=12301) 2024-11-27T13:24:50,636 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b1d5dc0987db4411a3e975033a712f4f 2024-11-27T13:24:50,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/de5c78a6a2884108926dcd3ebfd8a576 is 50, key is test_row_0/C:col10/1732713890085/Put/seqid=0 2024-11-27T13:24:50,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742162_1338 (size=12301) 2024-11-27T13:24:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:50,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:50,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713950711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713950713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713950713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T13:24:50,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713950814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713950816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:50,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713950816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:50,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/908f605917374a5ab55acddfce590261 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/908f605917374a5ab55acddfce590261 2024-11-27T13:24:50,969 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 908f605917374a5ab55acddfce590261(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:50,969 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:50,969 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713890542; duration=0sec 2024-11-27T13:24:50,970 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:50,970 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:51,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713951016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713951019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713951019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,048 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/de5c78a6a2884108926dcd3ebfd8a576 2024-11-27T13:24:51,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/845bdd99e2a641fda241f2a0dd653113 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113 2024-11-27T13:24:51,058 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113, entries=150, sequenceid=354, filesize=12.0 K 2024-11-27T13:24:51,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b1d5dc0987db4411a3e975033a712f4f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f 2024-11-27T13:24:51,063 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f, entries=150, sequenceid=354, filesize=12.0 K 2024-11-27T13:24:51,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/de5c78a6a2884108926dcd3ebfd8a576 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576 2024-11-27T13:24:51,068 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576, entries=150, sequenceid=354, filesize=12.0 K 2024-11-27T13:24:51,069 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 160ba87e97489a540350dc572e5f397d in 470ms, sequenceid=354, compaction requested=false 2024-11-27T13:24:51,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:51,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-27T13:24:51,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-27T13:24:51,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-27T13:24:51,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 930 msec 2024-11-27T13:24:51,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 935 msec 2024-11-27T13:24:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-27T13:24:51,243 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-27T13:24:51,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-11-27T13:24:51,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:51,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:51,246 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-27T13:24:51,248 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:51,248 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:51,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/84c69637a5514960b3298b6f5b3c47e2 is 50, key is test_row_0/A:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:51,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742163_1339 (size=12301) 2024-11-27T13:24:51,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/84c69637a5514960b3298b6f5b3c47e2 2024-11-27T13:24:51,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1553065b8c914183a95ca72f19d8852a is 50, key is test_row_0/B:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:51,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742164_1340 (size=12301) 2024-11-27T13:24:51,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713951299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713951318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713951322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713951322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-27T13:24:51,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:51,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:51,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:51,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713951403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-27T13:24:51,553 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:51,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:51,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:51,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713951606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1553065b8c914183a95ca72f19d8852a 2024-11-27T13:24:51,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:51,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:51,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:51,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f9588a868f78420db4f74f024ba33dd6 is 50, key is test_row_0/C:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:51,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742165_1341 (size=12301) 2024-11-27T13:24:51,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713951823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713951824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713951825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-27T13:24:51,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:51,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:51,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:51,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:51,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:51,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713951909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,015 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f9588a868f78420db4f74f024ba33dd6 2024-11-27T13:24:52,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/84c69637a5514960b3298b6f5b3c47e2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2 2024-11-27T13:24:52,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T13:24:52,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/1553065b8c914183a95ca72f19d8852a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a 2024-11-27T13:24:52,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T13:24:52,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/f9588a868f78420db4f74f024ba33dd6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6 2024-11-27T13:24:52,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6, entries=150, sequenceid=373, filesize=12.0 K 2024-11-27T13:24:52,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 160ba87e97489a540350dc572e5f397d in 900ms, sequenceid=373, compaction requested=true 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,146 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:52,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:52,146 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,148 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,148 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:52,148 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,148 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/de244090cf804a37bf94b5abbf84c548, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.8 K 2024-11-27T13:24:52,148 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,148 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:52,148 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,149 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/908f605917374a5ab55acddfce590261, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.8 K 2024-11-27T13:24:52,149 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting de244090cf804a37bf94b5abbf84c548, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:52,149 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 845bdd99e2a641fda241f2a0dd653113, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732713890085 2024-11-27T13:24:52,149 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 908f605917374a5ab55acddfce590261, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:52,150 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c69637a5514960b3298b6f5b3c47e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,150 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b1d5dc0987db4411a3e975033a712f4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732713890085 2024-11-27T13:24:52,150 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1553065b8c914183a95ca72f19d8852a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,157 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,158 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/7cf47f584e9f4b7bbae41776cff36895 is 50, key is test_row_0/A:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:52,158 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#293 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,158 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/ef8f208fc0f74798a2dcd1a2fe194083 is 50, key is test_row_0/B:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:52,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742167_1343 (size=13187) 2024-11-27T13:24:52,168 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742166_1342 (size=13187) 2024-11-27T13:24:52,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-27T13:24:52,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,169 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:52,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,176 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/7cf47f584e9f4b7bbae41776cff36895 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/7cf47f584e9f4b7bbae41776cff36895 2024-11-27T13:24:52,184 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 7cf47f584e9f4b7bbae41776cff36895(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,184 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,184 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713892146; duration=0sec 2024-11-27T13:24:52,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a2d6c1e21fd49a0a5540be4327610d1 is 50, key is test_row_0/A:col10/1732713891291/Put/seqid=0 2024-11-27T13:24:52,184 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:52,184 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:52,184 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,186 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,186 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:52,186 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,187 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4da083fdedd74589908807fde18e7bb4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.8 K 2024-11-27T13:24:52,187 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4da083fdedd74589908807fde18e7bb4, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732713889454 2024-11-27T13:24:52,188 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting de5c78a6a2884108926dcd3ebfd8a576, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732713890085 2024-11-27T13:24:52,189 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9588a868f78420db4f74f024ba33dd6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,200 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#295 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,200 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/c6cdcb1808de4a318fb8a379e11e6e67 is 50, key is test_row_0/C:col10/1732713890712/Put/seqid=0 2024-11-27T13:24:52,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742169_1345 (size=13187) 2024-11-27T13:24:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742168_1344 (size=12301) 2024-11-27T13:24:52,211 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a2d6c1e21fd49a0a5540be4327610d1 2024-11-27T13:24:52,211 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/c6cdcb1808de4a318fb8a379e11e6e67 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c6cdcb1808de4a318fb8a379e11e6e67 2024-11-27T13:24:52,217 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into c6cdcb1808de4a318fb8a379e11e6e67(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,217 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,217 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713892146; duration=0sec 2024-11-27T13:24:52,217 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,217 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:52,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 is 50, key is test_row_0/B:col10/1732713891291/Put/seqid=0 2024-11-27T13:24:52,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742170_1346 (size=12301) 2024-11-27T13:24:52,226 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 2024-11-27T13:24:52,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/620753bf14b84e479c67a2a56f672db9 is 50, key is test_row_0/C:col10/1732713891291/Put/seqid=0 2024-11-27T13:24:52,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742171_1347 (size=12301) 2024-11-27T13:24:52,238 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/620753bf14b84e479c67a2a56f672db9 2024-11-27T13:24:52,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a2d6c1e21fd49a0a5540be4327610d1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1 2024-11-27T13:24:52,247 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1, entries=150, sequenceid=392, filesize=12.0 K 2024-11-27T13:24:52,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 2024-11-27T13:24:52,253 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6, entries=150, sequenceid=392, filesize=12.0 K 2024-11-27T13:24:52,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/620753bf14b84e479c67a2a56f672db9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9 2024-11-27T13:24:52,261 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9, entries=150, sequenceid=392, filesize=12.0 K 2024-11-27T13:24:52,262 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 160ba87e97489a540350dc572e5f397d in 93ms, sequenceid=392, compaction requested=false 2024-11-27T13:24:52,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-11-27T13:24:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-11-27T13:24:52,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-27T13:24:52,265 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0150 sec 2024-11-27T13:24:52,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.0210 sec 2024-11-27T13:24:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-27T13:24:52,351 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-27T13:24:52,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees 2024-11-27T13:24:52,354 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:52,354 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:52,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:52,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:52,432 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e8c6705d18e54b4a8deb7708f85e6fc3 is 50, key is test_row_0/A:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742172_1348 (size=12301) 2024-11-27T13:24:52,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e8c6705d18e54b4a8deb7708f85e6fc3 2024-11-27T13:24:52,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/cb6c5a8971214d49b8abdc7e3ff33d55 is 50, key is test_row_0/B:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742173_1349 (size=12301) 2024-11-27T13:24:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:52,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/cb6c5a8971214d49b8abdc7e3ff33d55 2024-11-27T13:24:52,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/759cbb621cb34ea3a661aca01d01f051 is 50, key is test_row_0/C:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742174_1350 (size=12301) 2024-11-27T13:24:52,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713952495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-27T13:24:52,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:52,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,572 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/ef8f208fc0f74798a2dcd1a2fe194083 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/ef8f208fc0f74798a2dcd1a2fe194083 2024-11-27T13:24:52,576 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into ef8f208fc0f74798a2dcd1a2fe194083(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,576 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,576 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713892146; duration=0sec 2024-11-27T13:24:52,577 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,577 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:52,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713952599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:52,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-27T13:24:52,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:52,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713952802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,812 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-27T13:24:52,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:52,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] handler.RSProcedureHandler(58): pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=96 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=96 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:52,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713952827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713952833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713952834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:52,874 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/759cbb621cb34ea3a661aca01d01f051 2024-11-27T13:24:52,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/e8c6705d18e54b4a8deb7708f85e6fc3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3 2024-11-27T13:24:52,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3, entries=150, sequenceid=405, filesize=12.0 K 2024-11-27T13:24:52,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/cb6c5a8971214d49b8abdc7e3ff33d55 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55 2024-11-27T13:24:52,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55, entries=150, sequenceid=405, filesize=12.0 K 2024-11-27T13:24:52,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/759cbb621cb34ea3a661aca01d01f051 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051 2024-11-27T13:24:52,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051, entries=150, sequenceid=405, filesize=12.0 K 2024-11-27T13:24:52,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 160ba87e97489a540350dc572e5f397d in 462ms, sequenceid=405, compaction requested=true 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,893 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:52,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:52,893 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,894 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,894 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:52,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:52,894 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,894 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,894 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/7cf47f584e9f4b7bbae41776cff36895, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.9 K 2024-11-27T13:24:52,894 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/ef8f208fc0f74798a2dcd1a2fe194083, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.9 K 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cf47f584e9f4b7bbae41776cff36895, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ef8f208fc0f74798a2dcd1a2fe194083, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5189c3ed3d9e48b6bb3f7eba0014e1c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732713891291 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a2d6c1e21fd49a0a5540be4327610d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732713891291 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8c6705d18e54b4a8deb7708f85e6fc3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:52,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cb6c5a8971214d49b8abdc7e3ff33d55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:52,903 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,903 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,904 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5b925011564c4a5aaccf7340b77280ed is 50, key is test_row_0/B:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,904 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/0c90acd503c847738f228281226269b1 is 50, key is test_row_0/A:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742176_1352 (size=13289) 2024-11-27T13:24:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742175_1351 (size=13289) 2024-11-27T13:24:52,918 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/0c90acd503c847738f228281226269b1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/0c90acd503c847738f228281226269b1 2024-11-27T13:24:52,918 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/5b925011564c4a5aaccf7340b77280ed as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5b925011564c4a5aaccf7340b77280ed 2024-11-27T13:24:52,923 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 0c90acd503c847738f228281226269b1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,923 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,923 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713892893; duration=0sec 2024-11-27T13:24:52,923 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:52,923 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:52,923 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:52,925 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into 5b925011564c4a5aaccf7340b77280ed(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,925 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,925 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713892893; duration=0sec 2024-11-27T13:24:52,925 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,925 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:52,925 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:52,925 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:52,925 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,925 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c6cdcb1808de4a318fb8a379e11e6e67, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=36.9 K 2024-11-27T13:24:52,926 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6cdcb1808de4a318fb8a379e11e6e67, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732713890709 2024-11-27T13:24:52,926 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 620753bf14b84e479c67a2a56f672db9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732713891291 2024-11-27T13:24:52,927 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 759cbb621cb34ea3a661aca01d01f051, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:52,955 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:52,955 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/7885c2dd28ca460085a6a1ee4caccacf is 50, key is test_row_0/C:col10/1732713892428/Put/seqid=0 2024-11-27T13:24:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:52,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742177_1353 (size=13289) 2024-11-27T13:24:52,964 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:52,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:52,965 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:52,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:52,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/f2a27bf298f44f27bd77d0cd3893ed1c is 50, key is test_row_0/A:col10/1732713892491/Put/seqid=0 2024-11-27T13:24:52,971 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/7885c2dd28ca460085a6a1ee4caccacf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7885c2dd28ca460085a6a1ee4caccacf 2024-11-27T13:24:52,977 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into 7885c2dd28ca460085a6a1ee4caccacf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:52,977 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:52,977 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713892893; duration=0sec 2024-11-27T13:24:52,977 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:52,977 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742178_1354 (size=12301) 2024-11-27T13:24:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:53,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:53,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713953122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:53,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713953224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:53,402 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/f2a27bf298f44f27bd77d0cd3893ed1c 2024-11-27T13:24:53,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2e698a004c3d40138673916aa33bef2e is 50, key is test_row_0/B:col10/1732713892491/Put/seqid=0 2024-11-27T13:24:53,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742179_1355 (size=12301) 2024-11-27T13:24:53,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713953430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:53,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713953735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:53,827 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2e698a004c3d40138673916aa33bef2e 2024-11-27T13:24:53,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/9f4dd60948bc410ab8e3b247dbfc1bfe is 50, key is test_row_0/C:col10/1732713892491/Put/seqid=0 2024-11-27T13:24:53,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742180_1356 (size=12301) 2024-11-27T13:24:53,845 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/9f4dd60948bc410ab8e3b247dbfc1bfe 2024-11-27T13:24:53,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/f2a27bf298f44f27bd77d0cd3893ed1c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c 2024-11-27T13:24:53,864 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c, entries=150, sequenceid=433, filesize=12.0 K 2024-11-27T13:24:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/2e698a004c3d40138673916aa33bef2e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e 2024-11-27T13:24:53,869 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e, entries=150, sequenceid=433, filesize=12.0 K 2024-11-27T13:24:53,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/9f4dd60948bc410ab8e3b247dbfc1bfe as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe 2024-11-27T13:24:53,875 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe, entries=150, sequenceid=433, filesize=12.0 K 2024-11-27T13:24:53,876 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 160ba87e97489a540350dc572e5f397d in 911ms, sequenceid=433, compaction requested=false 2024-11-27T13:24:53,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:53,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:53,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=96 2024-11-27T13:24:53,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=96 2024-11-27T13:24:53,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-27T13:24:53,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5230 sec 2024-11-27T13:24:53,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees in 1.5280 sec 2024-11-27T13:24:54,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:54,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:54,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:54,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a6002a4381b4dfb96fe2959ba490ac0 is 50, key is test_row_0/A:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:54,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742181_1357 (size=12301) 2024-11-27T13:24:54,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a6002a4381b4dfb96fe2959ba490ac0 2024-11-27T13:24:54,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c137977285a64d63a46480a050b66bfd is 50, key is test_row_0/B:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742182_1358 (size=12301) 2024-11-27T13:24:54,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713954293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713954396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-27T13:24:54,459 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-27T13:24:54,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:24:54,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees 2024-11-27T13:24:54,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:54,462 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:24:54,462 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:24:54,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:24:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:54,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713954599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:54,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-27T13:24:54,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:54,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c137977285a64d63a46480a050b66bfd 2024-11-27T13:24:54,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/debfac17b1aa469ab8056a546e63e60e is 50, key is test_row_0/C:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:54,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742183_1359 (size=12301) 2024-11-27T13:24:54,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:54,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:54,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-27T13:24:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36522 deadline: 1732713954833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,835 DEBUG [Thread-1209 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4125 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:54,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36574 deadline: 1732713954835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,837 DEBUG [Thread-1203 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4124 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:54,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36544 deadline: 1732713954838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,840 DEBUG [Thread-1205 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:24:54,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:24:54,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36558 deadline: 1732713954901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 2024-11-27T13:24:54,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:54,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-27T13:24:54,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:54,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:54,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:54,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:55,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:55,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-27T13:24:55,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:55,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:55,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:24:55,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/debfac17b1aa469ab8056a546e63e60e 2024-11-27T13:24:55,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/8a6002a4381b4dfb96fe2959ba490ac0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0 2024-11-27T13:24:55,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0, entries=150, sequenceid=446, filesize=12.0 K 2024-11-27T13:24:55,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/c137977285a64d63a46480a050b66bfd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd 2024-11-27T13:24:55,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd, entries=150, sequenceid=446, filesize=12.0 K 2024-11-27T13:24:55,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/debfac17b1aa469ab8056a546e63e60e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e 2024-11-27T13:24:55,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e, entries=150, sequenceid=446, filesize=12.0 K 2024-11-27T13:24:55,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 160ba87e97489a540350dc572e5f397d in 858ms, sequenceid=446, compaction requested=true 2024-11-27T13:24:55,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:55,099 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:55,099 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 160ba87e97489a540350dc572e5f397d:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:24:55,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:55,100 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:55,100 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:55,100 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/B is initiating minor compaction (all files) 2024-11-27T13:24:55,100 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/A is initiating minor compaction (all files) 2024-11-27T13:24:55,100 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/B in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,100 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/A in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,100 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/0c90acd503c847738f228281226269b1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=37.0 K 2024-11-27T13:24:55,100 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5b925011564c4a5aaccf7340b77280ed, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=37.0 K 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c90acd503c847738f228281226269b1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b925011564c4a5aaccf7340b77280ed, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2a27bf298f44f27bd77d0cd3893ed1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732713892489 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e698a004c3d40138673916aa33bef2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732713892489 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a6002a4381b4dfb96fe2959ba490ac0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713893110 2024-11-27T13:24:55,101 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c137977285a64d63a46480a050b66bfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713893110 2024-11-27T13:24:55,111 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#B#compaction#310 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:55,111 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/a0bc20f7bc7c4b7f96308942bb5bee90 is 50, key is test_row_0/B:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:55,118 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#A#compaction#311 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:55,118 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/04a2ff920b264aa4abd230938419f6c1 is 50, key is test_row_0/A:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:55,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742184_1360 (size=13391) 2024-11-27T13:24:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742185_1361 (size=13391) 2024-11-27T13:24:55,136 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/a0bc20f7bc7c4b7f96308942bb5bee90 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a0bc20f7bc7c4b7f96308942bb5bee90 2024-11-27T13:24:55,139 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/04a2ff920b264aa4abd230938419f6c1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/04a2ff920b264aa4abd230938419f6c1 2024-11-27T13:24:55,145 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/A of 160ba87e97489a540350dc572e5f397d into 04a2ff920b264aa4abd230938419f6c1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:55,145 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:55,145 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/A, priority=13, startTime=1732713895098; duration=0sec 2024-11-27T13:24:55,145 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:24:55,146 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:A 2024-11-27T13:24:55,146 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:24:55,147 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:24:55,147 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): 160ba87e97489a540350dc572e5f397d/C is initiating minor compaction (all files) 2024-11-27T13:24:55,147 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 160ba87e97489a540350dc572e5f397d/C in TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,147 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7885c2dd28ca460085a6a1ee4caccacf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp, totalSize=37.0 K 2024-11-27T13:24:55,148 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7885c2dd28ca460085a6a1ee4caccacf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732713892426 2024-11-27T13:24:55,148 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f4dd60948bc410ab8e3b247dbfc1bfe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732713892489 2024-11-27T13:24:55,149 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/B of 160ba87e97489a540350dc572e5f397d into a0bc20f7bc7c4b7f96308942bb5bee90(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:55,149 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:55,149 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting debfac17b1aa469ab8056a546e63e60e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1732713893110 2024-11-27T13:24:55,149 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/B, priority=13, startTime=1732713895099; duration=0sec 2024-11-27T13:24:55,149 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:55,149 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:B 2024-11-27T13:24:55,158 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 160ba87e97489a540350dc572e5f397d#C#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:24:55,159 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/62ac023806fd403584ae0f8c9c207159 is 50, key is test_row_0/C:col10/1732713894238/Put/seqid=0 2024-11-27T13:24:55,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742186_1362 (size=13391) 2024-11-27T13:24:55,179 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/62ac023806fd403584ae0f8c9c207159 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/62ac023806fd403584ae0f8c9c207159 2024-11-27T13:24:55,187 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 160ba87e97489a540350dc572e5f397d/C of 160ba87e97489a540350dc572e5f397d into 62ac023806fd403584ae0f8c9c207159(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:24:55,187 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:55,187 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d., storeName=160ba87e97489a540350dc572e5f397d/C, priority=13, startTime=1732713895099; duration=0sec 2024-11-27T13:24:55,187 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:24:55,187 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 160ba87e97489a540350dc572e5f397d:C 2024-11-27T13:24:55,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:55,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-27T13:24:55,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:55,227 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:24:55,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:55,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:55,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c29a85f6219e4c1ea1c7918241765d46 is 50, key is test_row_0/A:col10/1732713894292/Put/seqid=0 2024-11-27T13:24:55,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742187_1363 (size=12301) 2024-11-27T13:24:55,277 DEBUG [Thread-1220 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:59011 2024-11-27T13:24:55,277 DEBUG [Thread-1220 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,277 DEBUG [Thread-1218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:59011 2024-11-27T13:24:55,277 DEBUG [Thread-1218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,277 DEBUG [Thread-1216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:59011 2024-11-27T13:24:55,278 DEBUG [Thread-1216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,278 DEBUG [Thread-1214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:59011 2024-11-27T13:24:55,278 DEBUG [Thread-1214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,278 DEBUG [Thread-1222 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:59011 2024-11-27T13:24:55,278 DEBUG [Thread-1222 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. as already flushing 2024-11-27T13:24:55,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:55,406 DEBUG [Thread-1211 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:59011 2024-11-27T13:24:55,406 DEBUG [Thread-1211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:55,637 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c29a85f6219e4c1ea1c7918241765d46 2024-11-27T13:24:55,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b5a18cb2705b433dab09d3fd2eab9986 is 50, key is test_row_0/B:col10/1732713894292/Put/seqid=0 2024-11-27T13:24:55,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742188_1364 (size=12301) 2024-11-27T13:24:56,048 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b5a18cb2705b433dab09d3fd2eab9986 2024-11-27T13:24:56,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/c0da776cf90748908300d3d562f6eceb is 50, key is test_row_0/C:col10/1732713894292/Put/seqid=0 2024-11-27T13:24:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742189_1365 (size=12301) 2024-11-27T13:24:56,458 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=473 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/c0da776cf90748908300d3d562f6eceb 2024-11-27T13:24:56,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c29a85f6219e4c1ea1c7918241765d46 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c29a85f6219e4c1ea1c7918241765d46 2024-11-27T13:24:56,465 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c29a85f6219e4c1ea1c7918241765d46, entries=150, sequenceid=473, filesize=12.0 K 2024-11-27T13:24:56,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/b5a18cb2705b433dab09d3fd2eab9986 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b5a18cb2705b433dab09d3fd2eab9986 2024-11-27T13:24:56,469 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b5a18cb2705b433dab09d3fd2eab9986, entries=150, sequenceid=473, filesize=12.0 K 2024-11-27T13:24:56,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/c0da776cf90748908300d3d562f6eceb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c0da776cf90748908300d3d562f6eceb 2024-11-27T13:24:56,472 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c0da776cf90748908300d3d562f6eceb, entries=150, sequenceid=473, filesize=12.0 K 2024-11-27T13:24:56,473 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for 160ba87e97489a540350dc572e5f397d in 1246ms, sequenceid=473, compaction requested=false 2024-11-27T13:24:56,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2538): Flush status journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:24:56,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:56,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=98 2024-11-27T13:24:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=98 2024-11-27T13:24:56,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-27T13:24:56,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0120 sec 2024-11-27T13:24:56,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees in 2.0150 sec 2024-11-27T13:24:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-27T13:24:56,566 INFO [Thread-1213 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-27T13:24:57,962 DEBUG [Thread-1207 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:59011 2024-11-27T13:24:57,962 DEBUG [Thread-1207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:58,841 DEBUG [Thread-1203 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x669e1999 to 127.0.0.1:59011 2024-11-27T13:24:58,841 DEBUG [Thread-1203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:58,849 DEBUG [Thread-1205 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72aa9ee5 to 127.0.0.1:59011 2024-11-27T13:24:58,849 DEBUG [Thread-1205 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:58,881 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:59011 2024-11-27T13:24:58,881 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 137 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6944 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7051 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6800 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6981 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7002 2024-11-27T13:24:58,881 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:24:58,881 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:24:58,881 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b6adc5 to 127.0.0.1:59011 2024-11-27T13:24:58,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:24:58,882 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:24:58,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:24:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:24:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:24:58,885 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713898885"}]},"ts":"1732713898885"} 2024-11-27T13:24:58,886 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:24:58,888 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:24:58,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:24:58,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, UNASSIGN}] 2024-11-27T13:24:58,890 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, UNASSIGN 2024-11-27T13:24:58,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=160ba87e97489a540350dc572e5f397d, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:24:58,891 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:24:58,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:24:58,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:24:59,042 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:24:59,043 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 160ba87e97489a540350dc572e5f397d 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 160ba87e97489a540350dc572e5f397d, disabling compactions & flushes 2024-11-27T13:24:59,043 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. after waiting 0 ms 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:24:59,043 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(2837): Flushing 160ba87e97489a540350dc572e5f397d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=A 2024-11-27T13:24:59,043 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:59,044 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=B 2024-11-27T13:24:59,044 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:59,044 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 160ba87e97489a540350dc572e5f397d, store=C 2024-11-27T13:24:59,044 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:24:59,047 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c78a1f2932674877ae7d7661dfe09782 is 50, key is test_row_0/A:col10/1732713897961/Put/seqid=0 2024-11-27T13:24:59,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742190_1366 (size=12301) 2024-11-27T13:24:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:24:59,452 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c78a1f2932674877ae7d7661dfe09782 2024-11-27T13:24:59,457 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/0b1176ea9eb44d0cbc94e527fbd85f2d is 50, key is test_row_0/B:col10/1732713897961/Put/seqid=0 2024-11-27T13:24:59,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742191_1367 (size=12301) 2024-11-27T13:24:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:24:59,861 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/0b1176ea9eb44d0cbc94e527fbd85f2d 2024-11-27T13:24:59,867 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/bf577a018f384880be68e62f61814267 is 50, key is test_row_0/C:col10/1732713897961/Put/seqid=0 2024-11-27T13:24:59,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742192_1368 (size=12301) 2024-11-27T13:24:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:25:00,271 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=481 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/bf577a018f384880be68e62f61814267 2024-11-27T13:25:00,275 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/A/c78a1f2932674877ae7d7661dfe09782 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c78a1f2932674877ae7d7661dfe09782 2024-11-27T13:25:00,278 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c78a1f2932674877ae7d7661dfe09782, entries=150, sequenceid=481, filesize=12.0 K 2024-11-27T13:25:00,279 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/B/0b1176ea9eb44d0cbc94e527fbd85f2d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0b1176ea9eb44d0cbc94e527fbd85f2d 2024-11-27T13:25:00,282 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0b1176ea9eb44d0cbc94e527fbd85f2d, entries=150, sequenceid=481, filesize=12.0 K 2024-11-27T13:25:00,283 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/.tmp/C/bf577a018f384880be68e62f61814267 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/bf577a018f384880be68e62f61814267 2024-11-27T13:25:00,286 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/bf577a018f384880be68e62f61814267, entries=150, sequenceid=481, filesize=12.0 K 2024-11-27T13:25:00,287 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 160ba87e97489a540350dc572e5f397d in 1243ms, sequenceid=481, compaction requested=true 2024-11-27T13:25:00,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8ff886562e994e70b39744e37064932b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8bb12e4401cf49269901a05bd03dfcab, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9b480c202b5f44f9b0a6e7e43b589a90, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/dfb4264917f543309e794383b10d2a13, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/3b72756b92da48fca3e50720bbaa2b85, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/4c46d799428341dd87b7d7a056690269, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/de244090cf804a37bf94b5abbf84c548, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/7cf47f584e9f4b7bbae41776cff36895, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/0c90acd503c847738f228281226269b1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0] to archive 2024-11-27T13:25:00,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:00,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/ff8ee2d22c51461b826a2a6a5eb5e815 2024-11-27T13:25:00,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/24b37896175a4a5694786c894fb4f2df 2024-11-27T13:25:00,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8ff886562e994e70b39744e37064932b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8ff886562e994e70b39744e37064932b 2024-11-27T13:25:00,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c4d2b77dbf1645ceb7c1991070ccda33 2024-11-27T13:25:00,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c7331aefbebb473f94fdc8b8a62cd7b9 2024-11-27T13:25:00,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e4adb651eb2d45b6831cf4506e9a9d8d 2024-11-27T13:25:00,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8bb12e4401cf49269901a05bd03dfcab to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8bb12e4401cf49269901a05bd03dfcab 2024-11-27T13:25:00,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b3a3293d7be84037820337117ad38227 2024-11-27T13:25:00,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/a6b0070f57824ecc8949d63ce1511135 2024-11-27T13:25:00,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9b480c202b5f44f9b0a6e7e43b589a90 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9b480c202b5f44f9b0a6e7e43b589a90 2024-11-27T13:25:00,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/b35d9d02dcd64e978cbf34d470321afd 2024-11-27T13:25:00,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9cd1805094b34a81a38f657c74c1e4a7 2024-11-27T13:25:00,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/dfb4264917f543309e794383b10d2a13 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/dfb4264917f543309e794383b10d2a13 2024-11-27T13:25:00,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/9290e02f13ed40b59fb21ee7859b1498 2024-11-27T13:25:00,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/697e11f83acd46d8a9a32091abd7facd 2024-11-27T13:25:00,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/af5088c90a5a4b2b844790c77ba53494 2024-11-27T13:25:00,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/3b72756b92da48fca3e50720bbaa2b85 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/3b72756b92da48fca3e50720bbaa2b85 2024-11-27T13:25:00,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/49397b4d85ef4c8db88e4ebd36612ef4 2024-11-27T13:25:00,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/cdc25422034f40a1876e4f7d06318932 2024-11-27T13:25:00,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/4c46d799428341dd87b7d7a056690269 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/4c46d799428341dd87b7d7a056690269 2024-11-27T13:25:00,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/da3f89ef17894ef18ad98f5bf3e615ba 2024-11-27T13:25:00,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/07327cbe06bc4020994dbf078e554333 2024-11-27T13:25:00,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/de244090cf804a37bf94b5abbf84c548 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/de244090cf804a37bf94b5abbf84c548 2024-11-27T13:25:00,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/727eab659bc049b6afb20c5be1dd2e0a 2024-11-27T13:25:00,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/845bdd99e2a641fda241f2a0dd653113 2024-11-27T13:25:00,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/7cf47f584e9f4b7bbae41776cff36895 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/7cf47f584e9f4b7bbae41776cff36895 2024-11-27T13:25:00,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/84c69637a5514960b3298b6f5b3c47e2 2024-11-27T13:25:00,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a2d6c1e21fd49a0a5540be4327610d1 2024-11-27T13:25:00,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/0c90acd503c847738f228281226269b1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/0c90acd503c847738f228281226269b1 2024-11-27T13:25:00,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/e8c6705d18e54b4a8deb7708f85e6fc3 2024-11-27T13:25:00,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/f2a27bf298f44f27bd77d0cd3893ed1c 2024-11-27T13:25:00,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/8a6002a4381b4dfb96fe2959ba490ac0 2024-11-27T13:25:00,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c31f30786d764146a7657f6c72b54f7f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0faaa22b93764825896ef4f06048138c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/174bdf937b70414fa5d63411e468dae5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/41786ab74029462980ed1dfb9a0376e0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/40c15f92592c413cbe0839c974511083, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/e40057f034994b1ea571aea8391ed0fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/908f605917374a5ab55acddfce590261, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/ef8f208fc0f74798a2dcd1a2fe194083, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5b925011564c4a5aaccf7340b77280ed, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd] to archive 2024-11-27T13:25:00,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:00,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/8dd0e46b42a149049f8670c7cb317eef 2024-11-27T13:25:00,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c6617569dd4d49ca919d876c31b7ed8f 2024-11-27T13:25:00,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c31f30786d764146a7657f6c72b54f7f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c31f30786d764146a7657f6c72b54f7f 2024-11-27T13:25:00,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/975121e1c08e4ae4833d424c6464cda6 2024-11-27T13:25:00,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/699d19a808a24c5696e98cbfae8b248d 2024-11-27T13:25:00,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/190590559e514ed7b488f02a1f544fb3 2024-11-27T13:25:00,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0faaa22b93764825896ef4f06048138c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0faaa22b93764825896ef4f06048138c 2024-11-27T13:25:00,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/3efa0ec56ea8459384dcb65dedd49c0b 2024-11-27T13:25:00,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1e702d6b9f92498499f0750cd4cabcce 2024-11-27T13:25:00,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/174bdf937b70414fa5d63411e468dae5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/174bdf937b70414fa5d63411e468dae5 2024-11-27T13:25:00,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/20d5661711354b4ab7cb587d186989db 2024-11-27T13:25:00,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5aeb276d7a284d628363e874f73b55f2 2024-11-27T13:25:00,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/41786ab74029462980ed1dfb9a0376e0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/41786ab74029462980ed1dfb9a0376e0 2024-11-27T13:25:00,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c0b3303b2627445aa4a40be0eafd25a3 2024-11-27T13:25:00,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/71eaac90b06d4d8d968b3d115f99a424 2024-11-27T13:25:00,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/f80c1e82fecc4f71be58b39f85e2d964 2024-11-27T13:25:00,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/40c15f92592c413cbe0839c974511083 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/40c15f92592c413cbe0839c974511083 2024-11-27T13:25:00,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/bcfb61ac94db4f03b4d670944d240268 2024-11-27T13:25:00,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/448e8486c5734fc98e4176c26c0c57fd 2024-11-27T13:25:00,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/e40057f034994b1ea571aea8391ed0fb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/e40057f034994b1ea571aea8391ed0fb 2024-11-27T13:25:00,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2ff9def8aebc48548184c401afb7be8e 2024-11-27T13:25:00,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/86bdd34e0447401ab71d8a8fb86e9d98 2024-11-27T13:25:00,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/908f605917374a5ab55acddfce590261 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/908f605917374a5ab55acddfce590261 2024-11-27T13:25:00,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a21afcf71ebd4c85aba761ef1ca1525c 2024-11-27T13:25:00,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b1d5dc0987db4411a3e975033a712f4f 2024-11-27T13:25:00,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/ef8f208fc0f74798a2dcd1a2fe194083 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/ef8f208fc0f74798a2dcd1a2fe194083 2024-11-27T13:25:00,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/1553065b8c914183a95ca72f19d8852a 2024-11-27T13:25:00,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5189c3ed3d9e48b6bb3f7eba0014e1c6 2024-11-27T13:25:00,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5b925011564c4a5aaccf7340b77280ed to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/5b925011564c4a5aaccf7340b77280ed 2024-11-27T13:25:00,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/cb6c5a8971214d49b8abdc7e3ff33d55 2024-11-27T13:25:00,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/2e698a004c3d40138673916aa33bef2e 2024-11-27T13:25:00,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/c137977285a64d63a46480a050b66bfd 2024-11-27T13:25:00,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/ac2a0acc4fb948129477fdff80231731, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/78f5f377f2484072ae31403d2a93ccbb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/b81ee4b872974daebc5b8b55f4324464, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/38b9fe95c3df44f88f8be2a3448f2a79, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f983b429edce4dd2a26ba74c9b040e83, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d066603af18f418b88d5b59e886ac746, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4da083fdedd74589908807fde18e7bb4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c6cdcb1808de4a318fb8a379e11e6e67, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7885c2dd28ca460085a6a1ee4caccacf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e] to archive 2024-11-27T13:25:00,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:00,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4c9d4840f89f48a5b5c6bc47f7fffcf1 2024-11-27T13:25:00,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d2cc3a0ba1de421c9d4b4b74a56a5f2b 2024-11-27T13:25:00,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/ac2a0acc4fb948129477fdff80231731 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/ac2a0acc4fb948129477fdff80231731 2024-11-27T13:25:00,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/daece1105b78409c90dbe82ab635acc5 2024-11-27T13:25:00,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/124a90119a234d83b958464da7af0d01 2024-11-27T13:25:00,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5f238d5112854c7388e555a619f5c6be 2024-11-27T13:25:00,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/78f5f377f2484072ae31403d2a93ccbb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/78f5f377f2484072ae31403d2a93ccbb 2024-11-27T13:25:00,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d977de62d04d4c5a8a401496d98bf314 2024-11-27T13:25:00,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7f62c10de9484ef684dedd67f04a7b5a 2024-11-27T13:25:00,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/b81ee4b872974daebc5b8b55f4324464 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/b81ee4b872974daebc5b8b55f4324464 2024-11-27T13:25:00,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5680499b10d341b494d8089af8b46da2 2024-11-27T13:25:00,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/e6c4af664e50431880abeca575377935 2024-11-27T13:25:00,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/38b9fe95c3df44f88f8be2a3448f2a79 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/38b9fe95c3df44f88f8be2a3448f2a79 2024-11-27T13:25:00,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/6b791bea7a624570ba177d1c45d4dd29 2024-11-27T13:25:00,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/5bcefc44353944d98b185493cbafb9a2 2024-11-27T13:25:00,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/09b211349d4243cfb4ef1363f202cf21 2024-11-27T13:25:00,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f983b429edce4dd2a26ba74c9b040e83 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f983b429edce4dd2a26ba74c9b040e83 2024-11-27T13:25:00,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f2ba4ee9b5894bd09cf8cca9a4a7cb2b 2024-11-27T13:25:00,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/46db4dfaeeea4f258b080961cdb1ac6d 2024-11-27T13:25:00,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d066603af18f418b88d5b59e886ac746 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/d066603af18f418b88d5b59e886ac746 2024-11-27T13:25:00,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/56b95de4f1bc446b99099a67995a4d1b 2024-11-27T13:25:00,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/33429a567095425287c4f02174623a72 2024-11-27T13:25:00,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4da083fdedd74589908807fde18e7bb4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/4da083fdedd74589908807fde18e7bb4 2024-11-27T13:25:00,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f8eb10b288854406826688b5e1674d28 2024-11-27T13:25:00,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/de5c78a6a2884108926dcd3ebfd8a576 2024-11-27T13:25:00,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c6cdcb1808de4a318fb8a379e11e6e67 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c6cdcb1808de4a318fb8a379e11e6e67 2024-11-27T13:25:00,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/f9588a868f78420db4f74f024ba33dd6 2024-11-27T13:25:00,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/620753bf14b84e479c67a2a56f672db9 2024-11-27T13:25:00,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7885c2dd28ca460085a6a1ee4caccacf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/7885c2dd28ca460085a6a1ee4caccacf 2024-11-27T13:25:00,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/759cbb621cb34ea3a661aca01d01f051 2024-11-27T13:25:00,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/9f4dd60948bc410ab8e3b247dbfc1bfe 2024-11-27T13:25:00,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/debfac17b1aa469ab8056a546e63e60e 2024-11-27T13:25:00,383 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/recovered.edits/484.seqid, newMaxSeqId=484, maxSeqId=1 2024-11-27T13:25:00,384 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d. 2024-11-27T13:25:00,384 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 160ba87e97489a540350dc572e5f397d: 2024-11-27T13:25:00,385 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 160ba87e97489a540350dc572e5f397d 2024-11-27T13:25:00,385 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=160ba87e97489a540350dc572e5f397d, regionState=CLOSED 2024-11-27T13:25:00,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-27T13:25:00,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 160ba87e97489a540350dc572e5f397d, server=a0541979a851,32819,1732713812705 in 1.4950 sec 2024-11-27T13:25:00,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-27T13:25:00,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=160ba87e97489a540350dc572e5f397d, UNASSIGN in 1.4980 sec 2024-11-27T13:25:00,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-27T13:25:00,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5010 sec 2024-11-27T13:25:00,391 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713900391"}]},"ts":"1732713900391"} 2024-11-27T13:25:00,392 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:25:00,394 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:25:00,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5120 sec 2024-11-27T13:25:00,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-27T13:25:00,988 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-11-27T13:25:00,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:25:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:00,990 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=103, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-11-27T13:25:00,991 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=103, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:00,993 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d 2024-11-27T13:25:00,994 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/recovered.edits] 2024-11-27T13:25:00,997 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/04a2ff920b264aa4abd230938419f6c1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/04a2ff920b264aa4abd230938419f6c1 2024-11-27T13:25:00,998 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c29a85f6219e4c1ea1c7918241765d46 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c29a85f6219e4c1ea1c7918241765d46 2024-11-27T13:25:00,999 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c78a1f2932674877ae7d7661dfe09782 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/A/c78a1f2932674877ae7d7661dfe09782 2024-11-27T13:25:01,000 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0b1176ea9eb44d0cbc94e527fbd85f2d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/0b1176ea9eb44d0cbc94e527fbd85f2d 2024-11-27T13:25:01,001 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a0bc20f7bc7c4b7f96308942bb5bee90 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/a0bc20f7bc7c4b7f96308942bb5bee90 2024-11-27T13:25:01,002 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b5a18cb2705b433dab09d3fd2eab9986 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/B/b5a18cb2705b433dab09d3fd2eab9986 2024-11-27T13:25:01,004 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/62ac023806fd403584ae0f8c9c207159 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/62ac023806fd403584ae0f8c9c207159 2024-11-27T13:25:01,005 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/bf577a018f384880be68e62f61814267 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/bf577a018f384880be68e62f61814267 2024-11-27T13:25:01,006 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c0da776cf90748908300d3d562f6eceb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/C/c0da776cf90748908300d3d562f6eceb 2024-11-27T13:25:01,008 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/recovered.edits/484.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d/recovered.edits/484.seqid 2024-11-27T13:25:01,008 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/160ba87e97489a540350dc572e5f397d 2024-11-27T13:25:01,009 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:25:01,010 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=103, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:01,014 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:25:01,016 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:25:01,017 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=103, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:01,017 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:25:01,017 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713901017"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:01,018 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:25:01,018 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 160ba87e97489a540350dc572e5f397d, NAME => 'TestAcidGuarantees,,1732713873121.160ba87e97489a540350dc572e5f397d.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:25:01,019 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:25:01,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713901019"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:01,020 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:25:01,022 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=103, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:01,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-11-27T13:25:01,052 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-11-27T13:25:01,092 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 103 completed 2024-11-27T13:25:01,101 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 242), OpenFileDescriptor=451 (was 465), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=471 (was 478), ProcessCount=11 (was 11), AvailableMemoryMB=4140 (was 4200) 2024-11-27T13:25:01,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=471, ProcessCount=11, AvailableMemoryMB=4139 2024-11-27T13:25:01,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:25:01,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:01,113 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:25:01,113 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:01,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 104 2024-11-27T13:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:01,114 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:25:01,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742193_1369 (size=963) 2024-11-27T13:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:01,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:01,521 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:25:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742194_1370 (size=53) 2024-11-27T13:25:01,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d2d1919593dfb083385f344db3904c47, disabling compactions & flushes 2024-11-27T13:25:01,927 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. after waiting 0 ms 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:01,927 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:01,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:01,928 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:25:01,929 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713901928"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713901928"}]},"ts":"1732713901928"} 2024-11-27T13:25:01,930 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:25:01,930 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:25:01,931 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713901930"}]},"ts":"1732713901930"} 2024-11-27T13:25:01,931 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:25:01,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, ASSIGN}] 2024-11-27T13:25:01,936 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, ASSIGN 2024-11-27T13:25:01,936 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:25:02,087 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:02,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; OpenRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:02,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:02,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:02,242 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:02,242 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7285): Opening region: {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:25:02,243 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,243 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:02,243 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7327): checking encryption for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,243 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7330): checking classloading for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,244 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,245 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:02,245 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName A 2024-11-27T13:25:02,245 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:02,246 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:02,246 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,247 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:02,247 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName B 2024-11-27T13:25:02,247 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:02,247 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:02,247 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,248 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:02,248 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName C 2024-11-27T13:25:02,248 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:02,249 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:02,249 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:02,249 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,250 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,251 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:25:02,251 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1085): writing seq id for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:02,253 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:25:02,253 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1102): Opened d2d1919593dfb083385f344db3904c47; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59442130, jitterRate=-0.11424323916435242}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:25:02,254 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1001): Region open journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:02,254 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., pid=106, masterSystemTime=1732713902239 2024-11-27T13:25:02,255 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:02,256 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:02,256 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:02,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-11-27T13:25:02,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; OpenRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 in 169 msec 2024-11-27T13:25:02,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-27T13:25:02,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, ASSIGN in 323 msec 2024-11-27T13:25:02,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:25:02,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713902259"}]},"ts":"1732713902259"} 2024-11-27T13:25:02,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:25:02,263 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:25:02,263 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1520 sec 2024-11-27T13:25:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-27T13:25:03,218 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-27T13:25:03,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5fe71801 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@bf5e2f0 2024-11-27T13:25:03,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b82ba2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:03,224 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:03,225 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:03,226 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:25:03,227 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:25:03,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:25:03,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:25:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:03,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742195_1371 (size=999) 2024-11-27T13:25:03,639 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-27T13:25:03,639 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-27T13:25:03,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:25:03,642 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, REOPEN/MOVE}] 2024-11-27T13:25:03,643 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, REOPEN/MOVE 2024-11-27T13:25:03,643 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:03,644 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:25:03,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=110, ppid=109, state=RUNNABLE; CloseRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:03,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:03,796 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(124): Close d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:03,796 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:25:03,796 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1681): Closing d2d1919593dfb083385f344db3904c47, disabling compactions & flushes 2024-11-27T13:25:03,796 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:03,796 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:03,796 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. after waiting 0 ms 2024-11-27T13:25:03,796 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:03,800 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T13:25:03,800 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:03,800 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1635): Region close journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:03,800 WARN [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegionServer(3786): Not adding moved region record: d2d1919593dfb083385f344db3904c47 to self. 2024-11-27T13:25:03,802 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(170): Closed d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:03,802 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=CLOSED 2024-11-27T13:25:03,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=110, resume processing ppid=109 2024-11-27T13:25:03,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, ppid=109, state=SUCCESS; CloseRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 in 159 msec 2024-11-27T13:25:03,804 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, REOPEN/MOVE; state=CLOSED, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=true 2024-11-27T13:25:03,954 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:03,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE; OpenRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:04,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,110 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,110 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7285): Opening region: {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:25:04,111 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,111 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:04,111 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7327): checking encryption for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,111 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7330): checking classloading for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,112 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,113 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:04,113 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName A 2024-11-27T13:25:04,114 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:04,115 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:04,115 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,116 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:04,116 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName B 2024-11-27T13:25:04,116 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:04,116 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:04,116 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,117 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:04,117 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d1919593dfb083385f344db3904c47 columnFamilyName C 2024-11-27T13:25:04,117 DEBUG [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:04,117 INFO [StoreOpener-d2d1919593dfb083385f344db3904c47-1 {}] regionserver.HStore(327): Store=d2d1919593dfb083385f344db3904c47/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:04,117 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,118 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,119 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,120 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:25:04,121 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1085): writing seq id for d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,121 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1102): Opened d2d1919593dfb083385f344db3904c47; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66256166, jitterRate=-0.012706190347671509}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:25:04,122 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1001): Region open journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:04,123 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., pid=111, masterSystemTime=1732713904107 2024-11-27T13:25:04,124 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,124 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,124 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=OPEN, openSeqNum=5, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=109 2024-11-27T13:25:04,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=109, state=SUCCESS; OpenRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 in 169 msec 2024-11-27T13:25:04,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-27T13:25:04,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, REOPEN/MOVE in 484 msec 2024-11-27T13:25:04,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-11-27T13:25:04,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-27T13:25:04,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 900 msec 2024-11-27T13:25:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-11-27T13:25:04,132 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-11-27T13:25:04,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-11-27T13:25:04,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,142 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-11-27T13:25:04,145 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,146 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-11-27T13:25:04,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,153 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-11-27T13:25:04,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,157 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-11-27T13:25:04,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-11-27T13:25:04,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,166 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-11-27T13:25:04,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,172 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-11-27T13:25:04,176 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,176 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-11-27T13:25:04,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:04,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-27T13:25:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:04,191 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:04,191 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:04,191 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:04,192 DEBUG [hconnection-0x7582ce47-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,193 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,196 DEBUG [hconnection-0x631077e1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,196 DEBUG [hconnection-0x4001c522-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,196 DEBUG [hconnection-0x41e60d61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,196 DEBUG [hconnection-0xef2d3f0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,197 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,197 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,197 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,198 DEBUG [hconnection-0x803e15d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,198 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,198 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,200 DEBUG [hconnection-0x5b54ecf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,201 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,201 DEBUG [hconnection-0x5e0147f0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,203 DEBUG [hconnection-0x35b21613-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:04,204 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,206 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:04,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:04,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713964218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713964219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713964219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713964219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,220 DEBUG [hconnection-0x586a6932-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:04,221 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:04,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713964222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a46fe1a0c3f44ead810f873da06cdf7d_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713904203/Put/seqid=0 2024-11-27T13:25:04,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742196_1372 (size=12154) 2024-11-27T13:25:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:04,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713964320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713964320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713964320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713964320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713964323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,343 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:04,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:04,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:04,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:04,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:04,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713964526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713964526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713964527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713964527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713964527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:04,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:04,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,670 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:04,674 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a46fe1a0c3f44ead810f873da06cdf7d_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a46fe1a0c3f44ead810f873da06cdf7d_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:04,675 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/92c1efaa6d4047a7af6bec0dced43e93, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:04,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/92c1efaa6d4047a7af6bec0dced43e93 is 175, key is test_row_0/A:col10/1732713904203/Put/seqid=0 2024-11-27T13:25:04,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742197_1373 (size=30955) 2024-11-27T13:25:04,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:04,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:04,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713964829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713964830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713964832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713964834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:04,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713964834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:04,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:04,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:04,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:04,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:04,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,084 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/92c1efaa6d4047a7af6bec0dced43e93 2024-11-27T13:25:05,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:05,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5871b905b034438d92815de796e00350 is 50, key is test_row_0/B:col10/1732713904203/Put/seqid=0 2024-11-27T13:25:05,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:05,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:05,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742198_1374 (size=12001) 2024-11-27T13:25:05,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:05,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:05,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:05,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:05,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:05,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713965335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:05,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:05,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713965336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:05,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:05,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713965338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:05,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:05,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713965339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:05,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713965341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:05,416 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:05,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:05,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:05,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5871b905b034438d92815de796e00350 2024-11-27T13:25:05,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3bfab9f7477c4cd58d862fb043e8f762 is 50, key is test_row_0/C:col10/1732713904203/Put/seqid=0 2024-11-27T13:25:05,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742199_1375 (size=12001) 2024-11-27T13:25:05,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3bfab9f7477c4cd58d862fb043e8f762 2024-11-27T13:25:05,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/92c1efaa6d4047a7af6bec0dced43e93 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93 2024-11-27T13:25:05,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:05,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:05,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93, entries=150, sequenceid=17, filesize=30.2 K 2024-11-27T13:25:05,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5871b905b034438d92815de796e00350 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350 2024-11-27T13:25:05,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T13:25:05,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3bfab9f7477c4cd58d862fb043e8f762 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762 2024-11-27T13:25:05,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762, entries=150, sequenceid=17, filesize=11.7 K 2024-11-27T13:25:05,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d2d1919593dfb083385f344db3904c47 in 1378ms, sequenceid=17, compaction requested=false 2024-11-27T13:25:05,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:05,722 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:05,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:05,723 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:05,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:05,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112725e289bb3e984ad182a2f67c33b81657_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713904217/Put/seqid=0 2024-11-27T13:25:05,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742200_1376 (size=12154) 2024-11-27T13:25:05,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:05,741 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112725e289bb3e984ad182a2f67c33b81657_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112725e289bb3e984ad182a2f67c33b81657_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:05,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/7ae59c62a50546bb9d5a9da48589bc30, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:05,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/7ae59c62a50546bb9d5a9da48589bc30 is 175, key is test_row_0/A:col10/1732713904217/Put/seqid=0 2024-11-27T13:25:05,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742201_1377 (size=30955) 2024-11-27T13:25:05,909 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:25:06,150 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/7ae59c62a50546bb9d5a9da48589bc30 2024-11-27T13:25:06,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bc2294033fb24aaaa66ca076fa7b4803 is 50, key is test_row_0/B:col10/1732713904217/Put/seqid=0 2024-11-27T13:25:06,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742202_1378 (size=12001) 2024-11-27T13:25:06,164 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bc2294033fb24aaaa66ca076fa7b4803 2024-11-27T13:25:06,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab36e8035e27447e81225973a3d8ffb9 is 50, key is test_row_0/C:col10/1732713904217/Put/seqid=0 2024-11-27T13:25:06,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742203_1379 (size=12001) 2024-11-27T13:25:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:06,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:06,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713966353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713966354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713966354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713966356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713966356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713966458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713966459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713966459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713966459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713966461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,605 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab36e8035e27447e81225973a3d8ffb9 2024-11-27T13:25:06,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/7ae59c62a50546bb9d5a9da48589bc30 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30 2024-11-27T13:25:06,613 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30, entries=150, sequenceid=41, filesize=30.2 K 2024-11-27T13:25:06,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bc2294033fb24aaaa66ca076fa7b4803 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803 2024-11-27T13:25:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,619 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T13:25:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab36e8035e27447e81225973a3d8ffb9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9 2024-11-27T13:25:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,626 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T13:25:06,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,627 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2d1919593dfb083385f344db3904c47 in 904ms, sequenceid=41, compaction requested=false 2024-11-27T13:25:06,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:06,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:06,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-27T13:25:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-27T13:25:06,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4370 sec 2024-11-27T13:25:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,631 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.4410 sec 2024-11-27T13:25:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:06,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:25:06,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:06,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:06,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c5effe95334642c5ac4347559810bb96_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:06,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742204_1380 (size=14594) 2024-11-27T13:25:06,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:06,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713966721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713966722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713966727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713966728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713966728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713966829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713966829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713966832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713966836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:06,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:06,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713966837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713967035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713967035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713967038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713967041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713967041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,101 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:07,104 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c5effe95334642c5ac4347559810bb96_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c5effe95334642c5ac4347559810bb96_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:07,105 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/f92b5c210ad74832b562b9e5cabcf7e1, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:07,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/f92b5c210ad74832b562b9e5cabcf7e1 is 175, key is test_row_0/A:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:07,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742205_1381 (size=39549) 2024-11-27T13:25:07,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713967339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713967341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713967344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713967348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713967349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,512 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/f92b5c210ad74832b562b9e5cabcf7e1 2024-11-27T13:25:07,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/088c76ff683b480da9f2a73b767cf0e4 is 50, key is test_row_0/B:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742206_1382 (size=12001) 2024-11-27T13:25:07,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713967843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713967849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713967852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713967855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713967860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:07,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/088c76ff683b480da9f2a73b767cf0e4 2024-11-27T13:25:07,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/544361ffc48547c3b5a91353a9225cb7 is 50, key is test_row_0/C:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:07,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742207_1383 (size=12001) 2024-11-27T13:25:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-27T13:25:08,295 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-27T13:25:08,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-27T13:25:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:08,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:08,299 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:08,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:08,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/544361ffc48547c3b5a91353a9225cb7 2024-11-27T13:25:08,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/f92b5c210ad74832b562b9e5cabcf7e1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1 2024-11-27T13:25:08,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1, entries=200, sequenceid=55, filesize=38.6 K 2024-11-27T13:25:08,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/088c76ff683b480da9f2a73b767cf0e4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4 2024-11-27T13:25:08,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:25:08,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/544361ffc48547c3b5a91353a9225cb7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7 2024-11-27T13:25:08,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7, entries=150, sequenceid=55, filesize=11.7 K 2024-11-27T13:25:08,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d2d1919593dfb083385f344db3904c47 in 1673ms, sequenceid=55, compaction requested=true 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:08,359 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:08,359 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:08,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:08,360 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:08,360 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:08,360 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:08,360 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:08,360 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:08,360 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:08,360 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=35.2 K 2024-11-27T13:25:08,360 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=99.1 K 2024-11-27T13:25:08,360 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:08,360 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1] 2024-11-27T13:25:08,361 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5871b905b034438d92815de796e00350, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713904199 2024-11-27T13:25:08,361 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92c1efaa6d4047a7af6bec0dced43e93, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713904199 2024-11-27T13:25:08,361 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bc2294033fb24aaaa66ca076fa7b4803, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713904217 2024-11-27T13:25:08,361 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ae59c62a50546bb9d5a9da48589bc30, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713904217 2024-11-27T13:25:08,361 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 088c76ff683b480da9f2a73b767cf0e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:08,362 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f92b5c210ad74832b562b9e5cabcf7e1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:08,370 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:08,370 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/d5b5dd96e0a3440f921768e3f661426e is 50, key is test_row_0/B:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:08,376 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:08,389 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127ea4714f6ed9d41e2b96404e5de396a03_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:08,391 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127ea4714f6ed9d41e2b96404e5de396a03_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:08,391 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ea4714f6ed9d41e2b96404e5de396a03_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:08,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742208_1384 (size=12104) 2024-11-27T13:25:08,418 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/d5b5dd96e0a3440f921768e3f661426e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/d5b5dd96e0a3440f921768e3f661426e 2024-11-27T13:25:08,427 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into d5b5dd96e0a3440f921768e3f661426e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:08,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:08,427 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=13, startTime=1732713908359; duration=0sec 2024-11-27T13:25:08,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:08,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:08,427 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:08,429 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:08,429 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/C is initiating minor compaction (all files) 2024-11-27T13:25:08,429 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/C in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:08,430 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=35.2 K 2024-11-27T13:25:08,430 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bfab9f7477c4cd58d862fb043e8f762, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732713904199 2024-11-27T13:25:08,430 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ab36e8035e27447e81225973a3d8ffb9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713904217 2024-11-27T13:25:08,430 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 544361ffc48547c3b5a91353a9225cb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:08,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742209_1385 (size=4469) 2024-11-27T13:25:08,442 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#C#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:08,443 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/75ded68dce084ff19cfc729d2e60182c is 50, key is test_row_0/C:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:08,450 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:08,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-27T13:25:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:08,452 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:25:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:08,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:08,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:08,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:08,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742210_1386 (size=12104) 2024-11-27T13:25:08,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fba9f23380174288bd34cffaf94c0728_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713906717/Put/seqid=0 2024-11-27T13:25:08,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742211_1387 (size=12154) 2024-11-27T13:25:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:08,839 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#329 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:08,840 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/815935d1ed1e413da7c07d93f60a6014 is 175, key is test_row_0/A:col10/1732713906355/Put/seqid=0 2024-11-27T13:25:08,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742212_1388 (size=31058) 2024-11-27T13:25:08,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:08,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713968869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713968870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713968873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,880 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/75ded68dce084ff19cfc729d2e60182c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/75ded68dce084ff19cfc729d2e60182c 2024-11-27T13:25:08,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713968878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,885 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/C of d2d1919593dfb083385f344db3904c47 into 75ded68dce084ff19cfc729d2e60182c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:08,885 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:08,885 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/C, priority=13, startTime=1732713908359; duration=0sec 2024-11-27T13:25:08,885 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:08,885 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:08,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713968879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:08,895 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127fba9f23380174288bd34cffaf94c0728_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fba9f23380174288bd34cffaf94c0728_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:08,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/9d498f428bf24e459864ca19a2f6b651, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:08,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/9d498f428bf24e459864ca19a2f6b651 is 175, key is test_row_0/A:col10/1732713906717/Put/seqid=0 2024-11-27T13:25:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:08,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742213_1389 (size=30955) 2024-11-27T13:25:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713968980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713968980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713968984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:08,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713968988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713969183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713969183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713969188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713969192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,251 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/815935d1ed1e413da7c07d93f60a6014 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014 2024-11-27T13:25:09,256 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 815935d1ed1e413da7c07d93f60a6014(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:09,256 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:09,256 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=13, startTime=1732713908359; duration=0sec 2024-11-27T13:25:09,256 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:09,256 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:09,301 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/9d498f428bf24e459864ca19a2f6b651 2024-11-27T13:25:09,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5a1743451dda4ce29a9010b36d3db648 is 50, key is test_row_0/B:col10/1732713906717/Put/seqid=0 2024-11-27T13:25:09,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742214_1390 (size=12001) 2024-11-27T13:25:09,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:09,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713969487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713969487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713969491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713969497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,713 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5a1743451dda4ce29a9010b36d3db648 2024-11-27T13:25:09,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/f356c918867c4d7abb654b021e15e1f2 is 50, key is test_row_0/C:col10/1732713906717/Put/seqid=0 2024-11-27T13:25:09,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742215_1391 (size=12001) 2024-11-27T13:25:09,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713969993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713969994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:09,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713969998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:10,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:10,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713970002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:10,126 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/f356c918867c4d7abb654b021e15e1f2 2024-11-27T13:25:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/9d498f428bf24e459864ca19a2f6b651 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651 2024-11-27T13:25:10,134 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651, entries=150, sequenceid=78, filesize=30.2 K 2024-11-27T13:25:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/5a1743451dda4ce29a9010b36d3db648 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648 2024-11-27T13:25:10,139 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648, entries=150, sequenceid=78, filesize=11.7 K 2024-11-27T13:25:10,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/f356c918867c4d7abb654b021e15e1f2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2 2024-11-27T13:25:10,143 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2, entries=150, sequenceid=78, filesize=11.7 K 2024-11-27T13:25:10,144 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d2d1919593dfb083385f344db3904c47 in 1692ms, sequenceid=78, compaction requested=false 2024-11-27T13:25:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-27T13:25:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-27T13:25:10,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-27T13:25:10,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8460 sec 2024-11-27T13:25:10,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.8490 sec 2024-11-27T13:25:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-27T13:25:10,402 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-27T13:25:10,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-27T13:25:10,405 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:10,406 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:10,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:10,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:10,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:10,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-27T13:25:10,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:10,559 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:10,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c9d5be268bb3431990afade12ed9adac_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713908876/Put/seqid=0 2024-11-27T13:25:10,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742216_1392 (size=12154) 2024-11-27T13:25:10,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:10,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:10,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:10,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713970973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:10,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:10,981 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c9d5be268bb3431990afade12ed9adac_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c9d5be268bb3431990afade12ed9adac_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:10,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d52ab788c3fe431290fcbc5d15326c22, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:10,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d52ab788c3fe431290fcbc5d15326c22 is 175, key is test_row_0/A:col10/1732713908876/Put/seqid=0 2024-11-27T13:25:10,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742217_1393 (size=30955) 2024-11-27T13:25:11,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713970998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:11,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713971004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713971004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713971009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713971074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713971280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,387 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d52ab788c3fe431290fcbc5d15326c22 2024-11-27T13:25:11,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/e5e23358592c471aaeb64d2f8d2a0ec9 is 50, key is test_row_0/B:col10/1732713908876/Put/seqid=0 2024-11-27T13:25:11,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742218_1394 (size=12001) 2024-11-27T13:25:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:11,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713971588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:11,799 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/e5e23358592c471aaeb64d2f8d2a0ec9 2024-11-27T13:25:11,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3454d920acd14b8aab290d5834560fa0 is 50, key is test_row_0/C:col10/1732713908876/Put/seqid=0 2024-11-27T13:25:11,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742219_1395 (size=12001) 2024-11-27T13:25:12,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:12,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713972097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:12,209 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3454d920acd14b8aab290d5834560fa0 2024-11-27T13:25:12,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d52ab788c3fe431290fcbc5d15326c22 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22 2024-11-27T13:25:12,217 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22, entries=150, sequenceid=94, filesize=30.2 K 2024-11-27T13:25:12,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/e5e23358592c471aaeb64d2f8d2a0ec9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9 2024-11-27T13:25:12,222 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9, entries=150, sequenceid=94, filesize=11.7 K 2024-11-27T13:25:12,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/3454d920acd14b8aab290d5834560fa0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0 2024-11-27T13:25:12,227 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0, entries=150, sequenceid=94, filesize=11.7 K 2024-11-27T13:25:12,228 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d2d1919593dfb083385f344db3904c47 in 1670ms, sequenceid=94, compaction requested=true 2024-11-27T13:25:12,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:12,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:12,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-27T13:25:12,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-27T13:25:12,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-27T13:25:12,230 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8230 sec 2024-11-27T13:25:12,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8270 sec 2024-11-27T13:25:12,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-27T13:25:12,510 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-27T13:25:12,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:12,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-27T13:25:12,513 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:12,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-27T13:25:12,513 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:12,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-27T13:25:12,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:12,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-27T13:25:12,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:12,665 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:12,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:12,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112742b397f62a3d4657a117a91f55bab5af_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713910972/Put/seqid=0 2024-11-27T13:25:12,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742220_1396 (size=12154) 2024-11-27T13:25:12,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:12,680 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112742b397f62a3d4657a117a91f55bab5af_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742b397f62a3d4657a117a91f55bab5af_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:12,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4ac8eb5c568b430e8a5083f11005f5ee, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:12,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4ac8eb5c568b430e8a5083f11005f5ee is 175, key is test_row_0/A:col10/1732713910972/Put/seqid=0 2024-11-27T13:25:12,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742221_1397 (size=30955) 2024-11-27T13:25:12,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-27T13:25:13,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:13,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713973046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713973047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713973047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713973047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,086 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4ac8eb5c568b430e8a5083f11005f5ee 2024-11-27T13:25:13,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/cde24755cc2345dbb626fe177dc26404 is 50, key is test_row_0/B:col10/1732713910972/Put/seqid=0 2024-11-27T13:25:13,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742222_1398 (size=12001) 2024-11-27T13:25:13,101 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/cde24755cc2345dbb626fe177dc26404 2024-11-27T13:25:13,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713973103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/90cde851e5674f9e9eee3a479b4af5e1 is 50, key is test_row_0/C:col10/1732713910972/Put/seqid=0 2024-11-27T13:25:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-27T13:25:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742223_1399 (size=12001) 2024-11-27T13:25:13,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713973153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713973157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713973157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713973157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713973360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713973361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713973363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713973363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,526 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/90cde851e5674f9e9eee3a479b4af5e1 2024-11-27T13:25:13,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4ac8eb5c568b430e8a5083f11005f5ee as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee 2024-11-27T13:25:13,534 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee, entries=150, sequenceid=116, filesize=30.2 K 2024-11-27T13:25:13,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/cde24755cc2345dbb626fe177dc26404 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404 2024-11-27T13:25:13,538 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404, entries=150, sequenceid=116, filesize=11.7 K 2024-11-27T13:25:13,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/90cde851e5674f9e9eee3a479b4af5e1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1 2024-11-27T13:25:13,542 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1, entries=150, sequenceid=116, filesize=11.7 K 2024-11-27T13:25:13,544 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d2d1919593dfb083385f344db3904c47 in 879ms, sequenceid=116, compaction requested=true 2024-11-27T13:25:13,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:13,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:13,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-27T13:25:13,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-27T13:25:13,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-27T13:25:13,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0320 sec 2024-11-27T13:25:13,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.0350 sec 2024-11-27T13:25:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-27T13:25:13,616 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-27T13:25:13,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:13,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-27T13:25:13,619 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:13,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:13,619 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:13,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:13,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:13,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:13,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278136191c39344bb49835e29d30d3f0fb_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:13,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742224_1400 (size=17184) 2024-11-27T13:25:13,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713973699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713973702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713973706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713973707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:13,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:13,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:13,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:13,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:13,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:13,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:13,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:13,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713973808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713973813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713973813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713973814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:13,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:13,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:13,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713974015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713974021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713974022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713974022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,077 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,085 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:14,088 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278136191c39344bb49835e29d30d3f0fb_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278136191c39344bb49835e29d30d3f0fb_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:14,089 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/e786545f20044ec491909e28efc13277, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:14,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/e786545f20044ec491909e28efc13277 is 175, key is test_row_0/A:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:14,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742225_1401 (size=48289) 2024-11-27T13:25:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:14,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713974320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713974326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713974328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713974328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,384 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,494 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/e786545f20044ec491909e28efc13277 2024-11-27T13:25:14,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1c6b04c4ec1d480791d3358157649c79 is 50, key is test_row_0/B:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742226_1402 (size=12151) 2024-11-27T13:25:14,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,689 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:14,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713974827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713974833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,842 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713974840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:14,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713974841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:14,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1c6b04c4ec1d480791d3358157649c79 2024-11-27T13:25:14,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/52985386b74241e987cc7033f4d5419e is 50, key is test_row_0/C:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:14,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742227_1403 (size=12151) 2024-11-27T13:25:14,995 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:14,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:14,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713975122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,126 DEBUG [Thread-1663 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:25:15,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:15,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,301 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:15,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:15,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:15,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:15,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/52985386b74241e987cc7033f4d5419e 2024-11-27T13:25:15,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/e786545f20044ec491909e28efc13277 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277 2024-11-27T13:25:15,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277, entries=250, sequenceid=132, filesize=47.2 K 2024-11-27T13:25:15,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1c6b04c4ec1d480791d3358157649c79 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79 2024-11-27T13:25:15,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79, entries=150, sequenceid=132, filesize=11.9 K 2024-11-27T13:25:15,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/52985386b74241e987cc7033f4d5419e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e 2024-11-27T13:25:15,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e, entries=150, sequenceid=132, filesize=11.9 K 2024-11-27T13:25:15,342 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for d2d1919593dfb083385f344db3904c47 in 1671ms, sequenceid=132, compaction requested=true 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:15,343 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:15,343 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:15,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:15,345 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60258 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:25:15,345 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:15,345 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,345 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/d5b5dd96e0a3440f921768e3f661426e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=58.8 K 2024-11-27T13:25:15,345 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 172212 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:25:15,345 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:15,345 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,345 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=168.2 K 2024-11-27T13:25:15,346 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,346 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277] 2024-11-27T13:25:15,346 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d5b5dd96e0a3440f921768e3f661426e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:15,346 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 815935d1ed1e413da7c07d93f60a6014, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:15,347 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a1743451dda4ce29a9010b36d3db648, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732713906717 2024-11-27T13:25:15,347 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d498f428bf24e459864ca19a2f6b651, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732713906717 2024-11-27T13:25:15,347 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e5e23358592c471aaeb64d2f8d2a0ec9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713908856 2024-11-27T13:25:15,347 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d52ab788c3fe431290fcbc5d15326c22, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713908856 2024-11-27T13:25:15,348 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ac8eb5c568b430e8a5083f11005f5ee, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732713910940 2024-11-27T13:25:15,348 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cde24755cc2345dbb626fe177dc26404, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732713910940 2024-11-27T13:25:15,348 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e786545f20044ec491909e28efc13277, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913045 2024-11-27T13:25:15,348 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c6b04c4ec1d480791d3358157649c79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913046 2024-11-27T13:25:15,361 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#343 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:15,362 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/8ef8bb6c390a46bca6f326f92c8a89f7 is 50, key is test_row_0/B:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:15,363 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:15,371 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127df0d344a8ed14c6997011e4da23c0d8a_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:15,374 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127df0d344a8ed14c6997011e4da23c0d8a_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:15,375 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127df0d344a8ed14c6997011e4da23c0d8a_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:15,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742228_1404 (size=12425) 2024-11-27T13:25:15,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742229_1405 (size=4469) 2024-11-27T13:25:15,454 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:15,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-27T13:25:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,455 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:15,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:15,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275d7d82041cb440e6891cb8d1a580967c_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713913706/Put/seqid=0 2024-11-27T13:25:15,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742230_1406 (size=12304) 2024-11-27T13:25:15,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:15,476 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275d7d82041cb440e6891cb8d1a580967c_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7d82041cb440e6891cb8d1a580967c_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/79b497ed746d4374819d5526dbf4221a, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/79b497ed746d4374819d5526dbf4221a is 175, key is test_row_0/A:col10/1732713913706/Put/seqid=0 2024-11-27T13:25:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742231_1407 (size=31105) 2024-11-27T13:25:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:15,783 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/8ef8bb6c390a46bca6f326f92c8a89f7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/8ef8bb6c390a46bca6f326f92c8a89f7 2024-11-27T13:25:15,783 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#344 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:15,784 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/5ce4c95c170d454c86e869225db89926 is 175, key is test_row_0/A:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:15,792 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into 8ef8bb6c390a46bca6f326f92c8a89f7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:15,792 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:15,792 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=11, startTime=1732713915343; duration=0sec 2024-11-27T13:25:15,792 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:15,792 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:15,792 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:25:15,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742232_1408 (size=31379) 2024-11-27T13:25:15,799 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60258 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:25:15,799 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/C is initiating minor compaction (all files) 2024-11-27T13:25:15,799 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/C in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:15,799 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/75ded68dce084ff19cfc729d2e60182c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=58.8 K 2024-11-27T13:25:15,800 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 75ded68dce084ff19cfc729d2e60182c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732713906355 2024-11-27T13:25:15,800 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f356c918867c4d7abb654b021e15e1f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732713906717 2024-11-27T13:25:15,800 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3454d920acd14b8aab290d5834560fa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732713908856 2024-11-27T13:25:15,801 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 90cde851e5674f9e9eee3a479b4af5e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732713910940 2024-11-27T13:25:15,801 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 52985386b74241e987cc7033f4d5419e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913046 2024-11-27T13:25:15,803 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/5ce4c95c170d454c86e869225db89926 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926 2024-11-27T13:25:15,808 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 5ce4c95c170d454c86e869225db89926(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:15,808 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:15,808 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=11, startTime=1732713915343; duration=0sec 2024-11-27T13:25:15,808 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:15,808 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:15,813 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#C#compaction#346 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:15,814 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/130e9c5a9a1046d7a9395463bda5e7cf is 50, key is test_row_0/C:col10/1732713913670/Put/seqid=0 2024-11-27T13:25:15,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742233_1409 (size=12425) 2024-11-27T13:25:15,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:15,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713975863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713975864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713975869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713975870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,883 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=152, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/79b497ed746d4374819d5526dbf4221a 2024-11-27T13:25:15,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bd109ec9715643d6b384148999557813 is 50, key is test_row_0/B:col10/1732713913706/Put/seqid=0 2024-11-27T13:25:15,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742234_1410 (size=12151) 2024-11-27T13:25:15,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713975971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713975971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713975974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:15,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:15,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713975979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713976177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713976178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713976179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713976183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,227 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/130e9c5a9a1046d7a9395463bda5e7cf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/130e9c5a9a1046d7a9395463bda5e7cf 2024-11-27T13:25:16,231 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in d2d1919593dfb083385f344db3904c47/C of d2d1919593dfb083385f344db3904c47 into 130e9c5a9a1046d7a9395463bda5e7cf(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:16,232 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,232 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/C, priority=11, startTime=1732713915343; duration=0sec 2024-11-27T13:25:16,232 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:16,232 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:16,294 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bd109ec9715643d6b384148999557813 2024-11-27T13:25:16,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/709f2f5efdbd4745aaa2c56e87c3a6fd is 50, key is test_row_0/C:col10/1732713913706/Put/seqid=0 2024-11-27T13:25:16,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742235_1411 (size=12151) 2024-11-27T13:25:16,307 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/709f2f5efdbd4745aaa2c56e87c3a6fd 2024-11-27T13:25:16,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/79b497ed746d4374819d5526dbf4221a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a 2024-11-27T13:25:16,322 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a, entries=150, sequenceid=152, filesize=30.4 K 2024-11-27T13:25:16,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/bd109ec9715643d6b384148999557813 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813 2024-11-27T13:25:16,326 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813, entries=150, sequenceid=152, filesize=11.9 K 2024-11-27T13:25:16,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/709f2f5efdbd4745aaa2c56e87c3a6fd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd 2024-11-27T13:25:16,330 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd, entries=150, sequenceid=152, filesize=11.9 K 2024-11-27T13:25:16,332 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for d2d1919593dfb083385f344db3904c47 in 877ms, sequenceid=152, compaction requested=false 2024-11-27T13:25:16,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:16,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-27T13:25:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-27T13:25:16,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-27T13:25:16,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7130 sec 2024-11-27T13:25:16,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.7170 sec 2024-11-27T13:25:16,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:16,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:16,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e00d6d9e1bb240028121347d9bdc03db_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742236_1412 (size=14794) 2024-11-27T13:25:16,497 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:16,501 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e00d6d9e1bb240028121347d9bdc03db_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e00d6d9e1bb240028121347d9bdc03db_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:16,502 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8f17a15d6d14485a9f3e2792975348e6, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8f17a15d6d14485a9f3e2792975348e6 is 175, key is test_row_0/A:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742237_1413 (size=39749) 2024-11-27T13:25:16,516 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8f17a15d6d14485a9f3e2792975348e6 2024-11-27T13:25:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713976507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/fc8f56468bf54a0ab79ff258e08d613a is 50, key is test_row_0/B:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713976516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713976516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713976517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742238_1414 (size=12151) 2024-11-27T13:25:16,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/fc8f56468bf54a0ab79ff258e08d613a 2024-11-27T13:25:16,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/1d7530c74fa94342bcd8512bc5dd6498 is 50, key is test_row_0/C:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742239_1415 (size=12151) 2024-11-27T13:25:16,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/1d7530c74fa94342bcd8512bc5dd6498 2024-11-27T13:25:16,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8f17a15d6d14485a9f3e2792975348e6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6 2024-11-27T13:25:16,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6, entries=200, sequenceid=173, filesize=38.8 K 2024-11-27T13:25:16,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/fc8f56468bf54a0ab79ff258e08d613a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a 2024-11-27T13:25:16,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a, entries=150, sequenceid=173, filesize=11.9 K 2024-11-27T13:25:16,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/1d7530c74fa94342bcd8512bc5dd6498 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498 2024-11-27T13:25:16,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498, entries=150, sequenceid=173, filesize=11.9 K 2024-11-27T13:25:16,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2d1919593dfb083385f344db3904c47 in 73ms, sequenceid=173, compaction requested=true 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:16,557 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:16,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:16,558 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:16,559 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:16,559 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:16,559 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:16,559 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:16,559 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:16,559 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:16,559 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/8ef8bb6c390a46bca6f326f92c8a89f7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=35.9 K 2024-11-27T13:25:16,559 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=99.8 K 2024-11-27T13:25:16,559 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:16,559 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6] 2024-11-27T13:25:16,560 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ef8bb6c390a46bca6f326f92c8a89f7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913046 2024-11-27T13:25:16,560 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ce4c95c170d454c86e869225db89926, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913046 2024-11-27T13:25:16,560 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting bd109ec9715643d6b384148999557813, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732713913698 2024-11-27T13:25:16,560 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79b497ed746d4374819d5526dbf4221a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732713913698 2024-11-27T13:25:16,560 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fc8f56468bf54a0ab79ff258e08d613a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915868 2024-11-27T13:25:16,561 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f17a15d6d14485a9f3e2792975348e6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915859 2024-11-27T13:25:16,568 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,568 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#352 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:16,569 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/fd74b26c40fc4d5c8486c578bcf3aafb is 50, key is test_row_0/B:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,571 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127f7e65855369140339cd573f2a2b98258_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,573 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127f7e65855369140339cd573f2a2b98258_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,573 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f7e65855369140339cd573f2a2b98258_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742240_1416 (size=12527) 2024-11-27T13:25:16,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742241_1417 (size=4469) 2024-11-27T13:25:16,579 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#353 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:16,580 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/38ab670fce00444d97230efd0f0dd3cf is 175, key is test_row_0/A:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,581 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/fd74b26c40fc4d5c8486c578bcf3aafb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fd74b26c40fc4d5c8486c578bcf3aafb 2024-11-27T13:25:16,586 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into fd74b26c40fc4d5c8486c578bcf3aafb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:16,586 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,586 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=13, startTime=1732713916557; duration=0sec 2024-11-27T13:25:16,586 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:16,586 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:16,586 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:16,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742242_1418 (size=31481) 2024-11-27T13:25:16,588 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36727 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:16,589 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/C is initiating minor compaction (all files) 2024-11-27T13:25:16,589 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/C in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:16,589 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/130e9c5a9a1046d7a9395463bda5e7cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=35.9 K 2024-11-27T13:25:16,589 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 130e9c5a9a1046d7a9395463bda5e7cf, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732713913046 2024-11-27T13:25:16,590 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 709f2f5efdbd4745aaa2c56e87c3a6fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1732713913698 2024-11-27T13:25:16,592 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/38ab670fce00444d97230efd0f0dd3cf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf 2024-11-27T13:25:16,593 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d7530c74fa94342bcd8512bc5dd6498, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915868 2024-11-27T13:25:16,597 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 38ab670fce00444d97230efd0f0dd3cf(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:16,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,597 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=13, startTime=1732713916557; duration=0sec 2024-11-27T13:25:16,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:16,597 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:16,601 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#C#compaction#354 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:16,601 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/fac9349ca4f24cd6a710b53141b25792 is 50, key is test_row_0/C:col10/1732713915868/Put/seqid=0 2024-11-27T13:25:16,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742243_1419 (size=12527) 2024-11-27T13:25:16,612 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/fac9349ca4f24cd6a710b53141b25792 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/fac9349ca4f24cd6a710b53141b25792 2024-11-27T13:25:16,619 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/C of d2d1919593dfb083385f344db3904c47 into fac9349ca4f24cd6a710b53141b25792(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:16,619 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:16,619 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/C, priority=13, startTime=1732713916557; duration=0sec 2024-11-27T13:25:16,619 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:16,619 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:16,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:16,635 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127acb24db64d7e46b1bcfe62c05850bb5f_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713916620/Put/seqid=0 2024-11-27T13:25:16,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713976644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713976645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713976646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713976647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742244_1420 (size=14794) 2024-11-27T13:25:16,658 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:16,662 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127acb24db64d7e46b1bcfe62c05850bb5f_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127acb24db64d7e46b1bcfe62c05850bb5f_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:16,663 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/c4b09f717253491ebed375ec0d596dc9, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:16,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/c4b09f717253491ebed375ec0d596dc9 is 175, key is test_row_0/A:col10/1732713916620/Put/seqid=0 2024-11-27T13:25:16,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742245_1421 (size=39749) 2024-11-27T13:25:16,671 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/c4b09f717253491ebed375ec0d596dc9 2024-11-27T13:25:16,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/6df6679efb16425992a26288688ec46c is 50, key is test_row_0/B:col10/1732713916620/Put/seqid=0 2024-11-27T13:25:16,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742246_1422 (size=12151) 2024-11-27T13:25:16,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713976748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713976750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713976752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713976753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713976953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713976956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713976957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:16,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:16,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713976958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/6df6679efb16425992a26288688ec46c 2024-11-27T13:25:17,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/2dd066040eea47f2ae5ee1e533bc4c39 is 50, key is test_row_0/C:col10/1732713916620/Put/seqid=0 2024-11-27T13:25:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742247_1423 (size=12151) 2024-11-27T13:25:17,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/2dd066040eea47f2ae5ee1e533bc4c39 2024-11-27T13:25:17,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/c4b09f717253491ebed375ec0d596dc9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9 2024-11-27T13:25:17,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9, entries=200, sequenceid=195, filesize=38.8 K 2024-11-27T13:25:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/6df6679efb16425992a26288688ec46c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c 2024-11-27T13:25:17,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c, entries=150, sequenceid=195, filesize=11.9 K 2024-11-27T13:25:17,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/2dd066040eea47f2ae5ee1e533bc4c39 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39 2024-11-27T13:25:17,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39, entries=150, sequenceid=195, filesize=11.9 K 2024-11-27T13:25:17,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d2d1919593dfb083385f344db3904c47 in 509ms, sequenceid=195, compaction requested=false 2024-11-27T13:25:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:17,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:17,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:17,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:17,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ff6974fac2a24c339f1f691903e24b58_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742248_1424 (size=14794) 2024-11-27T13:25:17,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713977285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713977285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713977287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713977288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713977390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713977390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713977390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713977393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713977595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713977595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713977596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713977596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,672 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:17,676 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ff6974fac2a24c339f1f691903e24b58_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ff6974fac2a24c339f1f691903e24b58_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:17,677 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/1dba2de7c5e5441b94e1dfcd65152b2b, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:17,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/1dba2de7c5e5441b94e1dfcd65152b2b is 175, key is test_row_0/A:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:17,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742249_1425 (size=39749) 2024-11-27T13:25:17,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-27T13:25:17,724 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-27T13:25:17,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:17,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-27T13:25:17,727 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:17,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:17,730 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:17,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:17,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:17,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:17,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:17,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:17,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:17,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:17,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:17,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:17,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713977900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713977900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713977901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:17,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713977901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:18,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:18,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:18,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,094 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/1dba2de7c5e5441b94e1dfcd65152b2b 2024-11-27T13:25:18,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1e92618652f34c84962d4175e6d47a12 is 50, key is test_row_0/B:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:18,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742250_1426 (size=12151) 2024-11-27T13:25:18,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1e92618652f34c84962d4175e6d47a12 2024-11-27T13:25:18,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/77edf5c773eb4ebc9376de71e475f8df is 50, key is test_row_0/C:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:18,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742251_1427 (size=12151) 2024-11-27T13:25:18,188 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:18,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:18,341 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:18,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:18,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:18,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713978409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:18,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713978409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:18,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:18,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713978410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:18,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:18,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713978410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:18,494 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:18,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:18,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:18,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:18,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/77edf5c773eb4ebc9376de71e475f8df 2024-11-27T13:25:18,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/1dba2de7c5e5441b94e1dfcd65152b2b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b 2024-11-27T13:25:18,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b, entries=200, sequenceid=213, filesize=38.8 K 2024-11-27T13:25:18,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/1e92618652f34c84962d4175e6d47a12 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12 2024-11-27T13:25:18,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T13:25:18,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/77edf5c773eb4ebc9376de71e475f8df as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df 2024-11-27T13:25:18,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T13:25:18,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2d1919593dfb083385f344db3904c47 in 1276ms, sequenceid=213, compaction requested=true 2024-11-27T13:25:18,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:18,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:18,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:18,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:18,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:18,536 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:18,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:18,537 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:18,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:18,537 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110979 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:18,538 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:18,538 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,538 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=108.4 K 2024-11-27T13:25:18,538 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,538 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b] 2024-11-27T13:25:18,538 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:18,538 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38ab670fce00444d97230efd0f0dd3cf, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915868 2024-11-27T13:25:18,538 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:18,538 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,539 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fd74b26c40fc4d5c8486c578bcf3aafb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=36.0 K 2024-11-27T13:25:18,539 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4b09f717253491ebed375ec0d596dc9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732713916497 2024-11-27T13:25:18,539 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fd74b26c40fc4d5c8486c578bcf3aafb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915868 2024-11-27T13:25:18,539 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dba2de7c5e5441b94e1dfcd65152b2b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:18,539 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6df6679efb16425992a26288688ec46c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732713916497 2024-11-27T13:25:18,540 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e92618652f34c84962d4175e6d47a12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:18,556 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:18,558 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#361 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:18,559 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/699df3c3326444d181c062d59684de89 is 50, key is test_row_0/B:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:18,572 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127066f55a0d98644b785529edab0137452_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:18,574 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127066f55a0d98644b785529edab0137452_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:18,574 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127066f55a0d98644b785529edab0137452_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:18,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742252_1428 (size=12629) 2024-11-27T13:25:18,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742253_1429 (size=4469) 2024-11-27T13:25:18,591 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#362 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:18,592 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/3dda76fe1f2647b1bc006391bb411577 is 175, key is test_row_0/A:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:18,594 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/699df3c3326444d181c062d59684de89 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/699df3c3326444d181c062d59684de89 2024-11-27T13:25:18,599 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into 699df3c3326444d181c062d59684de89(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:18,599 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:18,599 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=13, startTime=1732713918536; duration=0sec 2024-11-27T13:25:18,599 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:18,599 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:18,599 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:18,601 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:18,601 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/C is initiating minor compaction (all files) 2024-11-27T13:25:18,601 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/C in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,601 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/fac9349ca4f24cd6a710b53141b25792, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=36.0 K 2024-11-27T13:25:18,601 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fac9349ca4f24cd6a710b53141b25792, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732713915868 2024-11-27T13:25:18,601 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dd066040eea47f2ae5ee1e533bc4c39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732713916497 2024-11-27T13:25:18,602 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 77edf5c773eb4ebc9376de71e475f8df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:18,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742254_1430 (size=31583) 2024-11-27T13:25:18,612 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#C#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:18,613 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/b512182290b74f3c9946249b4ea141f8 is 50, key is test_row_0/C:col10/1732713917259/Put/seqid=0 2024-11-27T13:25:18,615 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/3dda76fe1f2647b1bc006391bb411577 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577 2024-11-27T13:25:18,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742255_1431 (size=12629) 2024-11-27T13:25:18,621 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 3dda76fe1f2647b1bc006391bb411577(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:18,621 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:18,621 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=13, startTime=1732713918536; duration=0sec 2024-11-27T13:25:18,622 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:18,622 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:18,647 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:18,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-27T13:25:18,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:18,648 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:18,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:18,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112793b2fae87be44233bf38e5045acb3e10_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713917283/Put/seqid=0 2024-11-27T13:25:18,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742256_1432 (size=12304) 2024-11-27T13:25:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:19,024 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/b512182290b74f3c9946249b4ea141f8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/b512182290b74f3c9946249b4ea141f8 2024-11-27T13:25:19,028 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/C of d2d1919593dfb083385f344db3904c47 into b512182290b74f3c9946249b4ea141f8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:19,028 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:19,028 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/C, priority=13, startTime=1732713918536; duration=0sec 2024-11-27T13:25:19,028 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:19,028 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:19,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:19,064 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112793b2fae87be44233bf38e5045acb3e10_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112793b2fae87be44233bf38e5045acb3e10_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:19,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/0c195244f74a4e7d8f3d1f0b718a3588, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:19,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/0c195244f74a4e7d8f3d1f0b718a3588 is 175, key is test_row_0/A:col10/1732713917283/Put/seqid=0 2024-11-27T13:25:19,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742257_1433 (size=31105) 2024-11-27T13:25:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:19,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:19,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713979210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713979314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713979415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713979417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713979417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713979422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,470 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/0c195244f74a4e7d8f3d1f0b718a3588 2024-11-27T13:25:19,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/0ed470eab469467f9f60f15b5d6f3ed0 is 50, key is test_row_0/B:col10/1732713917283/Put/seqid=0 2024-11-27T13:25:19,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742258_1434 (size=12151) 2024-11-27T13:25:19,481 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/0ed470eab469467f9f60f15b5d6f3ed0 2024-11-27T13:25:19,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/27b60a998bbe467abfa627b08f075841 is 50, key is test_row_0/C:col10/1732713917283/Put/seqid=0 2024-11-27T13:25:19,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742259_1435 (size=12151) 2024-11-27T13:25:19,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713979518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:19,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713979824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:19,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:19,892 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/27b60a998bbe467abfa627b08f075841 2024-11-27T13:25:19,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/0c195244f74a4e7d8f3d1f0b718a3588 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588 2024-11-27T13:25:19,901 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588, entries=150, sequenceid=233, filesize=30.4 K 2024-11-27T13:25:19,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/0ed470eab469467f9f60f15b5d6f3ed0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0 2024-11-27T13:25:19,905 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0, entries=150, sequenceid=233, filesize=11.9 K 2024-11-27T13:25:19,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/27b60a998bbe467abfa627b08f075841 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841 2024-11-27T13:25:19,910 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841, entries=150, sequenceid=233, filesize=11.9 K 2024-11-27T13:25:19,911 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2d1919593dfb083385f344db3904c47 in 1264ms, sequenceid=233, compaction requested=false 2024-11-27T13:25:19,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:19,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:19,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-27T13:25:19,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-27T13:25:19,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-27T13:25:19,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1820 sec 2024-11-27T13:25:19,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 2.1890 sec 2024-11-27T13:25:20,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:20,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-27T13:25:20,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:20,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:20,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:20,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:20,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:20,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:20,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275ab0c09798b64efabab7a54ea1616706_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:20,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742260_1436 (size=14794) 2024-11-27T13:25:20,352 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:20,356 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275ab0c09798b64efabab7a54ea1616706_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275ab0c09798b64efabab7a54ea1616706_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:20,358 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/17a109612e0745f28c5da7ba8525ec47, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:20,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/17a109612e0745f28c5da7ba8525ec47 is 175, key is test_row_0/A:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:20,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742261_1437 (size=39749) 2024-11-27T13:25:20,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:20,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713980408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:20,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713980513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:20,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:20,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713980718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:20,762 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/17a109612e0745f28c5da7ba8525ec47 2024-11-27T13:25:20,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/2a20f2457d784c05ac3e06dfbef31870 is 50, key is test_row_0/B:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:20,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742262_1438 (size=12151) 2024-11-27T13:25:20,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/2a20f2457d784c05ac3e06dfbef31870 2024-11-27T13:25:20,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8f09c605e63d43daa5651c9fc82400ca is 50, key is test_row_0/C:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:20,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742263_1439 (size=12151) 2024-11-27T13:25:21,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713981026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8f09c605e63d43daa5651c9fc82400ca 2024-11-27T13:25:21,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/17a109612e0745f28c5da7ba8525ec47 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47 2024-11-27T13:25:21,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47, entries=200, sequenceid=253, filesize=38.8 K 2024-11-27T13:25:21,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/2a20f2457d784c05ac3e06dfbef31870 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870 2024-11-27T13:25:21,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T13:25:21,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8f09c605e63d43daa5651c9fc82400ca as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca 2024-11-27T13:25:21,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T13:25:21,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d2d1919593dfb083385f344db3904c47 in 890ms, sequenceid=253, compaction requested=true 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:21,225 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:21,225 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:21,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:21,226 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:21,227 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:21,227 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,227 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/699df3c3326444d181c062d59684de89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=36.1 K 2024-11-27T13:25:21,227 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:21,227 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:21,227 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,227 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=100.0 K 2024-11-27T13:25:21,227 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,227 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47] 2024-11-27T13:25:21,228 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 699df3c3326444d181c062d59684de89, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:21,228 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dda76fe1f2647b1bc006391bb411577, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:21,228 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c195244f74a4e7d8f3d1f0b718a3588, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732713917283 2024-11-27T13:25:21,228 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ed470eab469467f9f60f15b5d6f3ed0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732713917283 2024-11-27T13:25:21,229 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a20f2457d784c05ac3e06dfbef31870, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713919202 2024-11-27T13:25:21,229 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17a109612e0745f28c5da7ba8525ec47, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713919161 2024-11-27T13:25:21,236 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:21,236 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#370 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:21,236 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/180dc9f540404e8ab039e5cf4451ccdc is 50, key is test_row_0/B:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:21,244 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112795ba0beab0df43c19a7236e0fe15a919_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:21,246 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112795ba0beab0df43c19a7236e0fe15a919_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:21,246 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112795ba0beab0df43c19a7236e0fe15a919_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:21,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742264_1440 (size=12731) 2024-11-27T13:25:21,253 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/180dc9f540404e8ab039e5cf4451ccdc as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/180dc9f540404e8ab039e5cf4451ccdc 2024-11-27T13:25:21,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742265_1441 (size=4469) 2024-11-27T13:25:21,258 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#371 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:21,258 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8ba3ffa2844f41999f95f5a83e3f9962 is 175, key is test_row_0/A:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:21,260 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into 180dc9f540404e8ab039e5cf4451ccdc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:21,260 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:21,260 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=13, startTime=1732713921225; duration=0sec 2024-11-27T13:25:21,260 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:21,260 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:21,260 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:21,261 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:21,261 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/C is initiating minor compaction (all files) 2024-11-27T13:25:21,262 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/C in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,262 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/b512182290b74f3c9946249b4ea141f8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=36.1 K 2024-11-27T13:25:21,263 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b512182290b74f3c9946249b4ea141f8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713916639 2024-11-27T13:25:21,263 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 27b60a998bbe467abfa627b08f075841, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732713917283 2024-11-27T13:25:21,264 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f09c605e63d43daa5651c9fc82400ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713919202 2024-11-27T13:25:21,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742266_1442 (size=31685) 2024-11-27T13:25:21,273 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/8ba3ffa2844f41999f95f5a83e3f9962 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962 2024-11-27T13:25:21,274 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#C#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:21,274 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/48744211a5be49f5924f353c303ee657 is 50, key is test_row_0/C:col10/1732713919208/Put/seqid=0 2024-11-27T13:25:21,280 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 8ba3ffa2844f41999f95f5a83e3f9962(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:21,280 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:21,280 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=13, startTime=1732713921225; duration=0sec 2024-11-27T13:25:21,280 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:21,280 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:21,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742267_1443 (size=12731) 2024-11-27T13:25:21,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/48744211a5be49f5924f353c303ee657 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/48744211a5be49f5924f353c303ee657 2024-11-27T13:25:21,295 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2d1919593dfb083385f344db3904c47/C of d2d1919593dfb083385f344db3904c47 into 48744211a5be49f5924f353c303ee657(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:21,295 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:21,295 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/C, priority=13, startTime=1732713921225; duration=0sec 2024-11-27T13:25:21,295 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:21,295 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:21,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:21,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:21,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270746754ccaa444ea94a0d50821e29fba_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713921422/Put/seqid=0 2024-11-27T13:25:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742268_1444 (size=12454) 2024-11-27T13:25:21,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713981446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713981447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713981448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713981449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713981531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713981552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713981552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713981553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713981553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713981757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713981757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713981758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713981758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:21,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-27T13:25:21,834 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-27T13:25:21,835 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:21,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-27T13:25:21,838 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:21,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:21,838 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:21,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:21,840 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270746754ccaa444ea94a0d50821e29fba_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270746754ccaa444ea94a0d50821e29fba_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:21,841 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d103b23f86ad40818a0eae8911743a81, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:21,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d103b23f86ad40818a0eae8911743a81 is 175, key is test_row_0/A:col10/1732713921422/Put/seqid=0 2024-11-27T13:25:21,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742269_1445 (size=31255) 2024-11-27T13:25:21,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:21,991 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:21,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:21,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:21,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:21,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:21,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:21,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713982064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713982064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713982064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713982064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:22,143 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:22,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,246 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d103b23f86ad40818a0eae8911743a81 2024-11-27T13:25:22,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/c2e74cb034bf4b78bddece45cdbfcb2b is 50, key is test_row_0/B:col10/1732713921422/Put/seqid=0 2024-11-27T13:25:22,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742270_1446 (size=12301) 2024-11-27T13:25:22,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:22,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:22,449 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:22,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:22,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:22,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49542 deadline: 1732713982545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713982569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713982570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713982571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713982572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:22,602 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:22,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:22,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/c2e74cb034bf4b78bddece45cdbfcb2b 2024-11-27T13:25:22,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/0682a4a21b984cb69b1b46e679cb8edf is 50, key is test_row_0/C:col10/1732713921422/Put/seqid=0 2024-11-27T13:25:22,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742271_1447 (size=12301) 2024-11-27T13:25:22,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/0682a4a21b984cb69b1b46e679cb8edf 2024-11-27T13:25:22,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/d103b23f86ad40818a0eae8911743a81 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81 2024-11-27T13:25:22,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81, entries=150, sequenceid=274, filesize=30.5 K 2024-11-27T13:25:22,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/c2e74cb034bf4b78bddece45cdbfcb2b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b 2024-11-27T13:25:22,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b, entries=150, sequenceid=274, filesize=12.0 K 2024-11-27T13:25:22,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/0682a4a21b984cb69b1b46e679cb8edf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/0682a4a21b984cb69b1b46e679cb8edf 2024-11-27T13:25:22,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/0682a4a21b984cb69b1b46e679cb8edf, entries=150, sequenceid=274, filesize=12.0 K 2024-11-27T13:25:22,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2d1919593dfb083385f344db3904c47 in 1263ms, sequenceid=274, compaction requested=false 2024-11-27T13:25:22,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:22,755 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:22,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-27T13:25:22,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:22,756 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:22,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:22,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270aac7ce92d634bb0b82d7a82dcbc64ea_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713921448/Put/seqid=0 2024-11-27T13:25:22,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742272_1448 (size=12454) 2024-11-27T13:25:22,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:23,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:23,174 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270aac7ce92d634bb0b82d7a82dcbc64ea_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270aac7ce92d634bb0b82d7a82dcbc64ea_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4874f316bf0a4e1eadbea58a77816416, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:23,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4874f316bf0a4e1eadbea58a77816416 is 175, key is test_row_0/A:col10/1732713921448/Put/seqid=0 2024-11-27T13:25:23,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742273_1449 (size=31255) 2024-11-27T13:25:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:23,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. as already flushing 2024-11-27T13:25:23,581 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4874f316bf0a4e1eadbea58a77816416 2024-11-27T13:25:23,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f895c6a5f5340a9960702ac7faaa0fb is 50, key is test_row_0/B:col10/1732713921448/Put/seqid=0 2024-11-27T13:25:23,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742274_1450 (size=12301) 2024-11-27T13:25:23,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713983598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713983599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713983599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713983601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713983704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713983704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713983704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713983709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49538 deadline: 1732713983909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49612 deadline: 1732713983909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49584 deadline: 1732713983909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:23,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49548 deadline: 1732713983914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:23,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:23,997 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f895c6a5f5340a9960702ac7faaa0fb 2024-11-27T13:25:24,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8833b156d5d54f59ba5ea1ef316e9fb8 is 50, key is test_row_0/C:col10/1732713921448/Put/seqid=0 2024-11-27T13:25:24,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742275_1451 (size=12301) 2024-11-27T13:25:24,023 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8833b156d5d54f59ba5ea1ef316e9fb8 2024-11-27T13:25:24,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/4874f316bf0a4e1eadbea58a77816416 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416 2024-11-27T13:25:24,033 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416, entries=150, sequenceid=292, filesize=30.5 K 2024-11-27T13:25:24,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f895c6a5f5340a9960702ac7faaa0fb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb 2024-11-27T13:25:24,038 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb, entries=150, sequenceid=292, filesize=12.0 K 2024-11-27T13:25:24,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/8833b156d5d54f59ba5ea1ef316e9fb8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8833b156d5d54f59ba5ea1ef316e9fb8 2024-11-27T13:25:24,043 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8833b156d5d54f59ba5ea1ef316e9fb8, entries=150, sequenceid=292, filesize=12.0 K 2024-11-27T13:25:24,044 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for d2d1919593dfb083385f344db3904c47 in 1288ms, sequenceid=292, compaction requested=true 2024-11-27T13:25:24,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:24,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:24,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-27T13:25:24,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-27T13:25:24,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-27T13:25:24,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2080 sec 2024-11-27T13:25:24,049 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.2120 sec 2024-11-27T13:25:24,203 DEBUG [Thread-1680 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:59011 2024-11-27T13:25:24,203 DEBUG [Thread-1676 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:59011 2024-11-27T13:25:24,203 DEBUG [Thread-1676 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,203 DEBUG [Thread-1680 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,204 DEBUG [Thread-1678 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:59011 2024-11-27T13:25:24,204 DEBUG [Thread-1678 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,206 DEBUG [Thread-1674 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:59011 2024-11-27T13:25:24,206 DEBUG [Thread-1674 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,207 DEBUG [Thread-1682 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:59011 2024-11-27T13:25:24,207 DEBUG [Thread-1682 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:24,215 DEBUG [Thread-1665 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:59011 2024-11-27T13:25:24,215 DEBUG [Thread-1667 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:59011 2024-11-27T13:25:24,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-27T13:25:24,215 DEBUG [Thread-1667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,215 DEBUG [Thread-1665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:24,217 DEBUG [Thread-1669 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:59011 2024-11-27T13:25:24,217 DEBUG [Thread-1669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,219 DEBUG [Thread-1671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:59011 2024-11-27T13:25:24,219 DEBUG [Thread-1671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112786d4545c657d43f6940bf5bb18bc7803_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_0/A:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:24,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742276_1452 (size=12454) 2024-11-27T13:25:24,562 DEBUG [Thread-1663 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:59011 2024-11-27T13:25:24,562 DEBUG [Thread-1663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:24,626 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:24,629 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112786d4545c657d43f6940bf5bb18bc7803_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112786d4545c657d43f6940bf5bb18bc7803_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:24,630 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/83335062426b43cca68d9353de1e59e6, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:24,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/83335062426b43cca68d9353de1e59e6 is 175, key is test_row_0/A:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:24,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742277_1453 (size=31255) 2024-11-27T13:25:25,034 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=312, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/83335062426b43cca68d9353de1e59e6 2024-11-27T13:25:25,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/94fa04f662e54e4c92e4cce52f0ccdf0 is 50, key is test_row_0/B:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742278_1454 (size=12301) 2024-11-27T13:25:25,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/94fa04f662e54e4c92e4cce52f0ccdf0 2024-11-27T13:25:25,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab5de967578241fa9a01e654a60b3946 is 50, key is test_row_0/C:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:25,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742279_1455 (size=12301) 2024-11-27T13:25:25,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab5de967578241fa9a01e654a60b3946 2024-11-27T13:25:25,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/83335062426b43cca68d9353de1e59e6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6 2024-11-27T13:25:25,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6, entries=150, sequenceid=312, filesize=30.5 K 2024-11-27T13:25:25,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/94fa04f662e54e4c92e4cce52f0ccdf0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0 2024-11-27T13:25:25,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0, entries=150, sequenceid=312, filesize=12.0 K 2024-11-27T13:25:25,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ab5de967578241fa9a01e654a60b3946 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab5de967578241fa9a01e654a60b3946 2024-11-27T13:25:25,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab5de967578241fa9a01e654a60b3946, entries=150, sequenceid=312, filesize=12.0 K 2024-11-27T13:25:25,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=20.13 KB/20610 for d2d1919593dfb083385f344db3904c47 in 1652ms, sequenceid=312, compaction requested=true 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:25,867 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2d1919593dfb083385f344db3904c47:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:25,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:25,867 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:25,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:25,868 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:25,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/A is initiating minor compaction (all files) 2024-11-27T13:25:25,868 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): d2d1919593dfb083385f344db3904c47/B is initiating minor compaction (all files) 2024-11-27T13:25:25,868 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/A in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:25,868 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2d1919593dfb083385f344db3904c47/B in TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:25,868 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=122.5 K 2024-11-27T13:25:25,868 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/180dc9f540404e8ab039e5cf4451ccdc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp, totalSize=48.5 K 2024-11-27T13:25:25,868 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:25,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6] 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 180dc9f540404e8ab039e5cf4451ccdc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713919202 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ba3ffa2844f41999f95f5a83e3f9962, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713919202 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c2e74cb034bf4b78bddece45cdbfcb2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732713920400 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d103b23f86ad40818a0eae8911743a81, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732713920400 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f895c6a5f5340a9960702ac7faaa0fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713921446 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4874f316bf0a4e1eadbea58a77816416, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732713921446 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 94fa04f662e54e4c92e4cce52f0ccdf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732713923591 2024-11-27T13:25:25,869 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83335062426b43cca68d9353de1e59e6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732713923591 2024-11-27T13:25:25,877 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#B#compaction#382 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:25,878 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/250595460eb84eb6bfc0428d29e97b61 is 50, key is test_row_0/B:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:25,880 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:25,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742280_1456 (size=13017) 2024-11-27T13:25:25,882 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112749f618a73001483fa847b4009af5b631_d2d1919593dfb083385f344db3904c47 store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:25,903 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112749f618a73001483fa847b4009af5b631_d2d1919593dfb083385f344db3904c47, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:25,903 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112749f618a73001483fa847b4009af5b631_d2d1919593dfb083385f344db3904c47 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:25,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742281_1457 (size=4469) 2024-11-27T13:25:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-27T13:25:25,943 INFO [Thread-1673 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 42 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2565 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7694 rows 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2561 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7683 rows 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2567 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7701 rows 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2576 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7728 rows 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2584 2024-11-27T13:25:25,943 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7752 rows 2024-11-27T13:25:25,943 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:25:25,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5fe71801 to 127.0.0.1:59011 2024-11-27T13:25:25,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:25,946 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:25:25,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:25:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:25,952 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713925951"}]},"ts":"1732713925951"} 2024-11-27T13:25:25,953 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:25:25,955 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:25:25,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:25:25,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, UNASSIGN}] 2024-11-27T13:25:25,957 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, UNASSIGN 2024-11-27T13:25:25,958 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:25,959 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:25:25,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; CloseRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:26,110 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:26,111 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(124): Close d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:26,111 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:25:26,111 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1681): Closing d2d1919593dfb083385f344db3904c47, disabling compactions & flushes 2024-11-27T13:25:26,111 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:26,286 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/250595460eb84eb6bfc0428d29e97b61 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/250595460eb84eb6bfc0428d29e97b61 2024-11-27T13:25:26,291 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2d1919593dfb083385f344db3904c47/B of d2d1919593dfb083385f344db3904c47 into 250595460eb84eb6bfc0428d29e97b61(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:26,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:26,291 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/B, priority=12, startTime=1732713925867; duration=0sec 2024-11-27T13:25:26,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:26,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:B 2024-11-27T13:25:26,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. because compaction request was cancelled 2024-11-27T13:25:26,291 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:C 2024-11-27T13:25:26,308 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2d1919593dfb083385f344db3904c47#A#compaction#383 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:26,309 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/2f72a626b00d4145af99809cf6f83d6c is 175, key is test_row_0/A:col10/1732713924214/Put/seqid=0 2024-11-27T13:25:26,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742282_1458 (size=31971) 2024-11-27T13:25:26,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:26,716 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/2f72a626b00d4145af99809cf6f83d6c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/2f72a626b00d4145af99809cf6f83d6c 2024-11-27T13:25:26,720 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2d1919593dfb083385f344db3904c47/A of d2d1919593dfb083385f344db3904c47 into 2f72a626b00d4145af99809cf6f83d6c(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:26,720 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:26,720 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47., storeName=d2d1919593dfb083385f344db3904c47/A, priority=12, startTime=1732713925867; duration=0sec 2024-11-27T13:25:26,720 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:26,720 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:26,720 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:26,720 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. after waiting 0 ms 2024-11-27T13:25:26,720 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2d1919593dfb083385f344db3904c47:A 2024-11-27T13:25:26,720 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:26,720 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(2837): Flushing d2d1919593dfb083385f344db3904c47 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=A 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=B 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2d1919593dfb083385f344db3904c47, store=C 2024-11-27T13:25:26,721 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:26,726 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e36366df3fb84dda8d319104f1aa6bdb_d2d1919593dfb083385f344db3904c47 is 50, key is test_row_1/A:col10/1732713924561/Put/seqid=0 2024-11-27T13:25:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742283_1459 (size=9914) 2024-11-27T13:25:27,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:27,134 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:27,137 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e36366df3fb84dda8d319104f1aa6bdb_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e36366df3fb84dda8d319104f1aa6bdb_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:27,138 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/908babc75bfc450c82800f207d6bf954, store: [table=TestAcidGuarantees family=A region=d2d1919593dfb083385f344db3904c47] 2024-11-27T13:25:27,139 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/908babc75bfc450c82800f207d6bf954 is 175, key is test_row_1/A:col10/1732713924561/Put/seqid=0 2024-11-27T13:25:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742284_1460 (size=22561) 2024-11-27T13:25:27,543 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=320, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/908babc75bfc450c82800f207d6bf954 2024-11-27T13:25:27,549 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f87b85b32894f81a842fe621c15cfc8 is 50, key is test_row_1/B:col10/1732713924561/Put/seqid=0 2024-11-27T13:25:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742285_1461 (size=9857) 2024-11-27T13:25:27,953 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f87b85b32894f81a842fe621c15cfc8 2024-11-27T13:25:27,958 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ebf206aacf3d4312a23368a00061f1ec is 50, key is test_row_1/C:col10/1732713924561/Put/seqid=0 2024-11-27T13:25:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742286_1462 (size=9857) 2024-11-27T13:25:28,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:28,362 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ebf206aacf3d4312a23368a00061f1ec 2024-11-27T13:25:28,366 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/A/908babc75bfc450c82800f207d6bf954 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/908babc75bfc450c82800f207d6bf954 2024-11-27T13:25:28,369 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/908babc75bfc450c82800f207d6bf954, entries=100, sequenceid=320, filesize=22.0 K 2024-11-27T13:25:28,369 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/B/7f87b85b32894f81a842fe621c15cfc8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f87b85b32894f81a842fe621c15cfc8 2024-11-27T13:25:28,372 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f87b85b32894f81a842fe621c15cfc8, entries=100, sequenceid=320, filesize=9.6 K 2024-11-27T13:25:28,372 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/.tmp/C/ebf206aacf3d4312a23368a00061f1ec as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ebf206aacf3d4312a23368a00061f1ec 2024-11-27T13:25:28,375 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ebf206aacf3d4312a23368a00061f1ec, entries=100, sequenceid=320, filesize=9.6 K 2024-11-27T13:25:28,376 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for d2d1919593dfb083385f344db3904c47 in 1656ms, sequenceid=320, compaction requested=true 2024-11-27T13:25:28,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6] to archive 2024-11-27T13:25:28,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:28,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/92c1efaa6d4047a7af6bec0dced43e93 2024-11-27T13:25:28,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/7ae59c62a50546bb9d5a9da48589bc30 2024-11-27T13:25:28,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/f92b5c210ad74832b562b9e5cabcf7e1 2024-11-27T13:25:28,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/815935d1ed1e413da7c07d93f60a6014 2024-11-27T13:25:28,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/9d498f428bf24e459864ca19a2f6b651 2024-11-27T13:25:28,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d52ab788c3fe431290fcbc5d15326c22 2024-11-27T13:25:28,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4ac8eb5c568b430e8a5083f11005f5ee 2024-11-27T13:25:28,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/e786545f20044ec491909e28efc13277 2024-11-27T13:25:28,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/5ce4c95c170d454c86e869225db89926 2024-11-27T13:25:28,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/79b497ed746d4374819d5526dbf4221a 2024-11-27T13:25:28,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8f17a15d6d14485a9f3e2792975348e6 2024-11-27T13:25:28,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/38ab670fce00444d97230efd0f0dd3cf 2024-11-27T13:25:28,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/c4b09f717253491ebed375ec0d596dc9 2024-11-27T13:25:28,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/1dba2de7c5e5441b94e1dfcd65152b2b 2024-11-27T13:25:28,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/3dda76fe1f2647b1bc006391bb411577 2024-11-27T13:25:28,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/0c195244f74a4e7d8f3d1f0b718a3588 2024-11-27T13:25:28,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/17a109612e0745f28c5da7ba8525ec47 2024-11-27T13:25:28,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/8ba3ffa2844f41999f95f5a83e3f9962 2024-11-27T13:25:28,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/d103b23f86ad40818a0eae8911743a81 2024-11-27T13:25:28,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/4874f316bf0a4e1eadbea58a77816416 2024-11-27T13:25:28,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/83335062426b43cca68d9353de1e59e6 2024-11-27T13:25:28,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/d5b5dd96e0a3440f921768e3f661426e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/8ef8bb6c390a46bca6f326f92c8a89f7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fd74b26c40fc4d5c8486c578bcf3aafb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/699df3c3326444d181c062d59684de89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/180dc9f540404e8ab039e5cf4451ccdc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0] to archive 2024-11-27T13:25:28,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:28,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5871b905b034438d92815de796e00350 2024-11-27T13:25:28,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bc2294033fb24aaaa66ca076fa7b4803 2024-11-27T13:25:28,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/d5b5dd96e0a3440f921768e3f661426e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/d5b5dd96e0a3440f921768e3f661426e 2024-11-27T13:25:28,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/088c76ff683b480da9f2a73b767cf0e4 2024-11-27T13:25:28,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/5a1743451dda4ce29a9010b36d3db648 2024-11-27T13:25:28,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/e5e23358592c471aaeb64d2f8d2a0ec9 2024-11-27T13:25:28,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/cde24755cc2345dbb626fe177dc26404 2024-11-27T13:25:28,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/8ef8bb6c390a46bca6f326f92c8a89f7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/8ef8bb6c390a46bca6f326f92c8a89f7 2024-11-27T13:25:28,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1c6b04c4ec1d480791d3358157649c79 2024-11-27T13:25:28,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/bd109ec9715643d6b384148999557813 2024-11-27T13:25:28,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fd74b26c40fc4d5c8486c578bcf3aafb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fd74b26c40fc4d5c8486c578bcf3aafb 2024-11-27T13:25:28,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/fc8f56468bf54a0ab79ff258e08d613a 2024-11-27T13:25:28,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/6df6679efb16425992a26288688ec46c 2024-11-27T13:25:28,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/699df3c3326444d181c062d59684de89 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/699df3c3326444d181c062d59684de89 2024-11-27T13:25:28,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/1e92618652f34c84962d4175e6d47a12 2024-11-27T13:25:28,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/0ed470eab469467f9f60f15b5d6f3ed0 2024-11-27T13:25:28,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/180dc9f540404e8ab039e5cf4451ccdc to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/180dc9f540404e8ab039e5cf4451ccdc 2024-11-27T13:25:28,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/2a20f2457d784c05ac3e06dfbef31870 2024-11-27T13:25:28,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/c2e74cb034bf4b78bddece45cdbfcb2b 2024-11-27T13:25:28,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f895c6a5f5340a9960702ac7faaa0fb 2024-11-27T13:25:28,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/94fa04f662e54e4c92e4cce52f0ccdf0 2024-11-27T13:25:28,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/75ded68dce084ff19cfc729d2e60182c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/130e9c5a9a1046d7a9395463bda5e7cf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/fac9349ca4f24cd6a710b53141b25792, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/b512182290b74f3c9946249b4ea141f8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca] to archive 2024-11-27T13:25:28,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:28,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3bfab9f7477c4cd58d862fb043e8f762 2024-11-27T13:25:28,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab36e8035e27447e81225973a3d8ffb9 2024-11-27T13:25:28,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/75ded68dce084ff19cfc729d2e60182c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/75ded68dce084ff19cfc729d2e60182c 2024-11-27T13:25:28,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/544361ffc48547c3b5a91353a9225cb7 2024-11-27T13:25:28,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/f356c918867c4d7abb654b021e15e1f2 2024-11-27T13:25:28,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/3454d920acd14b8aab290d5834560fa0 2024-11-27T13:25:28,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/90cde851e5674f9e9eee3a479b4af5e1 2024-11-27T13:25:28,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/130e9c5a9a1046d7a9395463bda5e7cf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/130e9c5a9a1046d7a9395463bda5e7cf 2024-11-27T13:25:28,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/52985386b74241e987cc7033f4d5419e 2024-11-27T13:25:28,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/709f2f5efdbd4745aaa2c56e87c3a6fd 2024-11-27T13:25:28,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/fac9349ca4f24cd6a710b53141b25792 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/fac9349ca4f24cd6a710b53141b25792 2024-11-27T13:25:28,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/1d7530c74fa94342bcd8512bc5dd6498 2024-11-27T13:25:28,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/2dd066040eea47f2ae5ee1e533bc4c39 2024-11-27T13:25:28,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/b512182290b74f3c9946249b4ea141f8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/b512182290b74f3c9946249b4ea141f8 2024-11-27T13:25:28,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/77edf5c773eb4ebc9376de71e475f8df 2024-11-27T13:25:28,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/27b60a998bbe467abfa627b08f075841 2024-11-27T13:25:28,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8f09c605e63d43daa5651c9fc82400ca 2024-11-27T13:25:28,431 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits/323.seqid, newMaxSeqId=323, maxSeqId=4 2024-11-27T13:25:28,432 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47. 2024-11-27T13:25:28,432 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1635): Region close journal for d2d1919593dfb083385f344db3904c47: 2024-11-27T13:25:28,433 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(170): Closed d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:28,433 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=d2d1919593dfb083385f344db3904c47, regionState=CLOSED 2024-11-27T13:25:28,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-27T13:25:28,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; CloseRegionProcedure d2d1919593dfb083385f344db3904c47, server=a0541979a851,32819,1732713812705 in 2.4750 sec 2024-11-27T13:25:28,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-27T13:25:28,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2d1919593dfb083385f344db3904c47, UNASSIGN in 2.4780 sec 2024-11-27T13:25:28,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-27T13:25:28,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.4800 sec 2024-11-27T13:25:28,438 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713928438"}]},"ts":"1732713928438"} 2024-11-27T13:25:28,439 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:25:28,441 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:25:28,442 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.4950 sec 2024-11-27T13:25:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-27T13:25:30,056 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-27T13:25:30,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:25:30,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,058 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T13:25:30,059 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,061 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,063 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits] 2024-11-27T13:25:30,065 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/2f72a626b00d4145af99809cf6f83d6c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/2f72a626b00d4145af99809cf6f83d6c 2024-11-27T13:25:30,066 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/908babc75bfc450c82800f207d6bf954 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/A/908babc75bfc450c82800f207d6bf954 2024-11-27T13:25:30,068 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/250595460eb84eb6bfc0428d29e97b61 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/250595460eb84eb6bfc0428d29e97b61 2024-11-27T13:25:30,069 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f87b85b32894f81a842fe621c15cfc8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/B/7f87b85b32894f81a842fe621c15cfc8 2024-11-27T13:25:30,071 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/0682a4a21b984cb69b1b46e679cb8edf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/0682a4a21b984cb69b1b46e679cb8edf 2024-11-27T13:25:30,071 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/48744211a5be49f5924f353c303ee657 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/48744211a5be49f5924f353c303ee657 2024-11-27T13:25:30,072 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8833b156d5d54f59ba5ea1ef316e9fb8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/8833b156d5d54f59ba5ea1ef316e9fb8 2024-11-27T13:25:30,073 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab5de967578241fa9a01e654a60b3946 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ab5de967578241fa9a01e654a60b3946 2024-11-27T13:25:30,074 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ebf206aacf3d4312a23368a00061f1ec to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/C/ebf206aacf3d4312a23368a00061f1ec 2024-11-27T13:25:30,075 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits/323.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47/recovered.edits/323.seqid 2024-11-27T13:25:30,076 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,076 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:25:30,076 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:25:30,077 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T13:25:30,079 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270746754ccaa444ea94a0d50821e29fba_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270746754ccaa444ea94a0d50821e29fba_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,080 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270aac7ce92d634bb0b82d7a82dcbc64ea_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411270aac7ce92d634bb0b82d7a82dcbc64ea_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,081 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112725e289bb3e984ad182a2f67c33b81657_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112725e289bb3e984ad182a2f67c33b81657_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,082 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742b397f62a3d4657a117a91f55bab5af_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112742b397f62a3d4657a117a91f55bab5af_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,083 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275ab0c09798b64efabab7a54ea1616706_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275ab0c09798b64efabab7a54ea1616706_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,084 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7d82041cb440e6891cb8d1a580967c_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275d7d82041cb440e6891cb8d1a580967c_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,085 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278136191c39344bb49835e29d30d3f0fb_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278136191c39344bb49835e29d30d3f0fb_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,086 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112786d4545c657d43f6940bf5bb18bc7803_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112786d4545c657d43f6940bf5bb18bc7803_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,087 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112793b2fae87be44233bf38e5045acb3e10_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112793b2fae87be44233bf38e5045acb3e10_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,087 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a46fe1a0c3f44ead810f873da06cdf7d_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127a46fe1a0c3f44ead810f873da06cdf7d_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,088 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127acb24db64d7e46b1bcfe62c05850bb5f_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127acb24db64d7e46b1bcfe62c05850bb5f_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,089 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c5effe95334642c5ac4347559810bb96_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c5effe95334642c5ac4347559810bb96_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,090 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c9d5be268bb3431990afade12ed9adac_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c9d5be268bb3431990afade12ed9adac_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,091 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e00d6d9e1bb240028121347d9bdc03db_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e00d6d9e1bb240028121347d9bdc03db_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,092 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e36366df3fb84dda8d319104f1aa6bdb_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e36366df3fb84dda8d319104f1aa6bdb_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,093 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fba9f23380174288bd34cffaf94c0728_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127fba9f23380174288bd34cffaf94c0728_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,093 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ff6974fac2a24c339f1f691903e24b58_d2d1919593dfb083385f344db3904c47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ff6974fac2a24c339f1f691903e24b58_d2d1919593dfb083385f344db3904c47 2024-11-27T13:25:30,094 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:25:30,096 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,097 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:25:30,099 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:25:30,099 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,099 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:25:30,099 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713930099"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:30,101 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:25:30,101 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d2d1919593dfb083385f344db3904c47, NAME => 'TestAcidGuarantees,,1732713901111.d2d1919593dfb083385f344db3904c47.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:25:30,101 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:25:30,101 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713930101"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:30,102 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:25:30,105 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,105 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-11-27T13:25:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-27T13:25:30,160 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-27T13:25:30,171 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=459 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=455 (was 471), ProcessCount=11 (was 11), AvailableMemoryMB=4112 (was 4139) 2024-11-27T13:25:30,180 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=455, ProcessCount=11, AvailableMemoryMB=4111 2024-11-27T13:25:30,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:25:30,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:25:30,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:30,183 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:25:30,183 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:30,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 131 2024-11-27T13:25:30,184 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:25:30,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:30,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742287_1463 (size=963) 2024-11-27T13:25:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:30,591 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:25:30,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742288_1464 (size=53) 2024-11-27T13:25:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing da754bda7623eef518328888f8b63cf4, disabling compactions & flushes 2024-11-27T13:25:30,997 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. after waiting 0 ms 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:30,997 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:30,997 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:30,998 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:25:30,998 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713930998"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713930998"}]},"ts":"1732713930998"} 2024-11-27T13:25:30,999 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:25:30,999 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:25:31,000 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713930999"}]},"ts":"1732713930999"} 2024-11-27T13:25:31,000 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:25:31,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, ASSIGN}] 2024-11-27T13:25:31,006 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, ASSIGN 2024-11-27T13:25:31,006 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:25:31,052 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:25:31,157 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=da754bda7623eef518328888f8b63cf4, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:31,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; OpenRegionProcedure da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:31,309 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:31,312 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:31,312 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7285): Opening region: {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:25:31,312 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,312 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:31,313 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7327): checking encryption for da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,313 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(7330): checking classloading for da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,314 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,315 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:31,315 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da754bda7623eef518328888f8b63cf4 columnFamilyName A 2024-11-27T13:25:31,315 DEBUG [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:31,316 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(327): Store=da754bda7623eef518328888f8b63cf4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:31,316 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,317 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:31,317 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da754bda7623eef518328888f8b63cf4 columnFamilyName B 2024-11-27T13:25:31,317 DEBUG [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:31,317 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(327): Store=da754bda7623eef518328888f8b63cf4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:31,317 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,318 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:31,318 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da754bda7623eef518328888f8b63cf4 columnFamilyName C 2024-11-27T13:25:31,318 DEBUG [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:31,319 INFO [StoreOpener-da754bda7623eef518328888f8b63cf4-1 {}] regionserver.HStore(327): Store=da754bda7623eef518328888f8b63cf4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:31,319 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:31,319 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,320 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,321 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:25:31,322 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1085): writing seq id for da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:31,323 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:25:31,323 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1102): Opened da754bda7623eef518328888f8b63cf4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62201423, jitterRate=-0.07312656939029694}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:25:31,324 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegion(1001): Region open journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:31,324 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., pid=133, masterSystemTime=1732713931309 2024-11-27T13:25:31,326 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:31,326 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=133}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:31,326 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=da754bda7623eef518328888f8b63cf4, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:31,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-27T13:25:31,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; OpenRegionProcedure da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 in 169 msec 2024-11-27T13:25:31,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-11-27T13:25:31,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, ASSIGN in 323 msec 2024-11-27T13:25:31,329 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:25:31,329 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713931329"}]},"ts":"1732713931329"} 2024-11-27T13:25:31,330 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:25:31,332 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:25:31,333 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-11-27T13:25:32,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=131 2024-11-27T13:25:32,288 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 131 completed 2024-11-27T13:25:32,290 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-27T13:25:32,293 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,294 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,295 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,296 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:25:32,297 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:25:32,298 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-27T13:25:32,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,302 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-27T13:25:32,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,305 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-27T13:25:32,307 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,308 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-27T13:25:32,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-27T13:25:32,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-27T13:25:32,316 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-27T13:25:32,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-27T13:25:32,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,325 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-27T13:25:32,330 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,330 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-27T13:25:32,333 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:32,336 DEBUG [hconnection-0x3b17e29f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,338 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:32,339 DEBUG [hconnection-0x1eb24088-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,339 DEBUG [hconnection-0x41391b5d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-27T13:25:32,340 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,340 DEBUG [hconnection-0x63a2d58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,340 DEBUG [hconnection-0x34f815bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,341 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,341 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:32,341 DEBUG [hconnection-0x147a5b94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:32,342 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:32,342 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,342 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:32,342 DEBUG [hconnection-0x1272b767-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,342 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,343 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,344 DEBUG [hconnection-0x6554444-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,345 DEBUG [hconnection-0x5186030b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,346 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,346 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,347 DEBUG [hconnection-0x6cb11359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:32,353 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:32,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:32,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713992371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713992371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713992372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713992373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713992373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8f3b11670edb4fb496c81c37ef8cf553 is 50, key is test_row_0/A:col10/1732713932351/Put/seqid=0 2024-11-27T13:25:32,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742289_1465 (size=12001) 2024-11-27T13:25:32,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8f3b11670edb4fb496c81c37ef8cf553 2024-11-27T13:25:32,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/24660acf42de478f80cfb44b549bcefd is 50, key is test_row_0/B:col10/1732713932351/Put/seqid=0 2024-11-27T13:25:32,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742290_1466 (size=12001) 2024-11-27T13:25:32,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/24660acf42de478f80cfb44b549bcefd 2024-11-27T13:25:32,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:32,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/5d1e2e7795994fc5aaf0f7eb88583687 is 50, key is test_row_0/C:col10/1732713932351/Put/seqid=0 2024-11-27T13:25:32,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713992474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713992474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713992474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713992476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742291_1467 (size=12001) 2024-11-27T13:25:32,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713992476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/5d1e2e7795994fc5aaf0f7eb88583687 2024-11-27T13:25:32,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8f3b11670edb4fb496c81c37ef8cf553 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553 2024-11-27T13:25:32,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553, entries=150, sequenceid=14, filesize=11.7 K 2024-11-27T13:25:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/24660acf42de478f80cfb44b549bcefd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd 2024-11-27T13:25:32,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd, entries=150, sequenceid=14, filesize=11.7 K 2024-11-27T13:25:32,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/5d1e2e7795994fc5aaf0f7eb88583687 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687 2024-11-27T13:25:32,494 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:32,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-27T13:25:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:32,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:32,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:32,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687, entries=150, sequenceid=14, filesize=11.7 K 2024-11-27T13:25:32,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:32,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for da754bda7623eef518328888f8b63cf4 in 143ms, sequenceid=14, compaction requested=false 2024-11-27T13:25:32,497 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-27T13:25:32,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:32,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:32,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:32,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-27T13:25:32,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:32,648 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:32,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:32,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/d121b91678ed433084a5d6e39a7c8367 is 50, key is test_row_0/A:col10/1732713932371/Put/seqid=0 2024-11-27T13:25:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742292_1468 (size=12001) 2024-11-27T13:25:32,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:32,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:32,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713992687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713992688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713992694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713992695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713992695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713992796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713992796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713992796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713992803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:32,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713992803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:32,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:33,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713993000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713993000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713993000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713993008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713993009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,078 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/d121b91678ed433084a5d6e39a7c8367 2024-11-27T13:25:33,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f4e9a2c60fcb4e539ef7a4873645e2aa is 50, key is test_row_0/B:col10/1732713932371/Put/seqid=0 2024-11-27T13:25:33,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742293_1469 (size=12001) 2024-11-27T13:25:33,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713993304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713993304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713993304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713993310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713993314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:33,493 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f4e9a2c60fcb4e539ef7a4873645e2aa 2024-11-27T13:25:33,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/431712205d90403dbe501e2cf5fc2b97 is 50, key is test_row_0/C:col10/1732713932371/Put/seqid=0 2024-11-27T13:25:33,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742294_1470 (size=12001) 2024-11-27T13:25:33,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713993809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713993812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713993812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713993816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713993819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:33,913 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/431712205d90403dbe501e2cf5fc2b97 2024-11-27T13:25:33,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/d121b91678ed433084a5d6e39a7c8367 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367 2024-11-27T13:25:33,924 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367, entries=150, sequenceid=39, filesize=11.7 K 2024-11-27T13:25:33,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f4e9a2c60fcb4e539ef7a4873645e2aa as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa 2024-11-27T13:25:33,931 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa, entries=150, sequenceid=39, filesize=11.7 K 2024-11-27T13:25:33,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/431712205d90403dbe501e2cf5fc2b97 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97 2024-11-27T13:25:33,935 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97, entries=150, sequenceid=39, filesize=11.7 K 2024-11-27T13:25:33,936 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for da754bda7623eef518328888f8b63cf4 in 1288ms, sequenceid=39, compaction requested=false 2024-11-27T13:25:33,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:33,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:33,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-27T13:25:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-27T13:25:33,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-27T13:25:33,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5960 sec 2024-11-27T13:25:33,941 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6010 sec 2024-11-27T13:25:33,954 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:25:34,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-27T13:25:34,446 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-27T13:25:34,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:34,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-27T13:25:34,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T13:25:34,449 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:34,450 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:34,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:34,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T13:25:34,602 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:34,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-27T13:25:34,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:34,603 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:34,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:34,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7e784ff900d6455bb098f861d8d13c09 is 50, key is test_row_0/A:col10/1732713932693/Put/seqid=0 2024-11-27T13:25:34,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742295_1471 (size=12001) 2024-11-27T13:25:34,616 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7e784ff900d6455bb098f861d8d13c09 2024-11-27T13:25:34,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/b09f629385b449588e7fa6b2ae9ee3c8 is 50, key is test_row_0/B:col10/1732713932693/Put/seqid=0 2024-11-27T13:25:34,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742296_1472 (size=12001) 2024-11-27T13:25:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T13:25:34,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:34,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:34,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713994848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713994854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713994855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713994859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713994859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713994959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713994963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713994965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713994966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:34,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713994967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,028 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/b09f629385b449588e7fa6b2ae9ee3c8 2024-11-27T13:25:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/7647a390bdb64047b169f346e0ab2c2c is 50, key is test_row_0/C:col10/1732713932693/Put/seqid=0 2024-11-27T13:25:35,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742297_1473 (size=12001) 2024-11-27T13:25:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T13:25:35,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713995166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713995168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713995173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713995173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713995173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,443 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/7647a390bdb64047b169f346e0ab2c2c 2024-11-27T13:25:35,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7e784ff900d6455bb098f861d8d13c09 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09 2024-11-27T13:25:35,450 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09, entries=150, sequenceid=51, filesize=11.7 K 2024-11-27T13:25:35,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/b09f629385b449588e7fa6b2ae9ee3c8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8 2024-11-27T13:25:35,455 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8, entries=150, sequenceid=51, filesize=11.7 K 2024-11-27T13:25:35,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/7647a390bdb64047b169f346e0ab2c2c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c 2024-11-27T13:25:35,459 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c, entries=150, sequenceid=51, filesize=11.7 K 2024-11-27T13:25:35,459 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 856ms, sequenceid=51, compaction requested=true 2024-11-27T13:25:35,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:35,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:35,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-27T13:25:35,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-27T13:25:35,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-27T13:25:35,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-11-27T13:25:35,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.0150 sec 2024-11-27T13:25:35,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:35,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:25:35,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:35,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/94cf7d36a3c6434bb475df12b6c74982 is 50, key is test_row_0/A:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:35,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742298_1474 (size=14341) 2024-11-27T13:25:35,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713995488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713995491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713995492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713995494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713995495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-27T13:25:35,552 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-27T13:25:35,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-27T13:25:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:35,554 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:35,555 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:35,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:35,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713995595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713995596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713995598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713995598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713995601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:35,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:35,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:35,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:35,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:35,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:35,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713995803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713995803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713995804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713995804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713995807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:35,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:35,861 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:35,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:35,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:35,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/94cf7d36a3c6434bb475df12b6c74982 2024-11-27T13:25:35,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/18c5ac84bdb14df9982e36fd8cb7cf91 is 50, key is test_row_0/B:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:35,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742299_1475 (size=12001) 2024-11-27T13:25:36,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:36,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:36,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:36,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713996110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713996110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713996110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713996111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713996111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:36,165 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:36,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:36,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/18c5ac84bdb14df9982e36fd8cb7cf91 2024-11-27T13:25:36,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/21c01a72f4db4985ab28f1c5bdfce47b is 50, key is test_row_0/C:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:36,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742300_1476 (size=12001) 2024-11-27T13:25:36,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/21c01a72f4db4985ab28f1c5bdfce47b 2024-11-27T13:25:36,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:36,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/94cf7d36a3c6434bb475df12b6c74982 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982 2024-11-27T13:25:36,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:36,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982, entries=200, sequenceid=76, filesize=14.0 K 2024-11-27T13:25:36,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/18c5ac84bdb14df9982e36fd8cb7cf91 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91 2024-11-27T13:25:36,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91, entries=150, sequenceid=76, filesize=11.7 K 2024-11-27T13:25:36,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/21c01a72f4db4985ab28f1c5bdfce47b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b 2024-11-27T13:25:36,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b, entries=150, sequenceid=76, filesize=11.7 K 2024-11-27T13:25:36,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for da754bda7623eef518328888f8b63cf4 in 856ms, sequenceid=76, compaction requested=true 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:36,333 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:36,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:36,333 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:36,337 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:36,337 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:36,337 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,337 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=49.2 K 2024-11-27T13:25:36,337 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f3b11670edb4fb496c81c37ef8cf553, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732713932351 2024-11-27T13:25:36,338 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d121b91678ed433084a5d6e39a7c8367, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732713932369 2024-11-27T13:25:36,338 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e784ff900d6455bb098f861d8d13c09, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732713932687 2024-11-27T13:25:36,338 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94cf7d36a3c6434bb475df12b6c74982, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934839 2024-11-27T13:25:36,341 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:36,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:36,342 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,342 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=46.9 K 2024-11-27T13:25:36,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 24660acf42de478f80cfb44b549bcefd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732713932351 2024-11-27T13:25:36,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f4e9a2c60fcb4e539ef7a4873645e2aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732713932369 2024-11-27T13:25:36,343 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b09f629385b449588e7fa6b2ae9ee3c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732713932687 2024-11-27T13:25:36,343 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 18c5ac84bdb14df9982e36fd8cb7cf91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934846 2024-11-27T13:25:36,351 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:36,351 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8a102f8f1b2f49d29aba2559917c9831 is 50, key is test_row_0/A:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:36,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742301_1477 (size=12139) 2024-11-27T13:25:36,357 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#400 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:36,357 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/3b2a3b471ddd4aaf89aaf6aaf268569b is 50, key is test_row_0/B:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:36,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742302_1478 (size=12139) 2024-11-27T13:25:36,366 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8a102f8f1b2f49d29aba2559917c9831 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8a102f8f1b2f49d29aba2559917c9831 2024-11-27T13:25:36,370 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 8a102f8f1b2f49d29aba2559917c9831(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:36,370 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:36,370 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=12, startTime=1732713936333; duration=0sec 2024-11-27T13:25:36,370 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:36,370 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:36,370 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:36,372 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:36,372 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:36,372 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,372 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=46.9 K 2024-11-27T13:25:36,372 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d1e2e7795994fc5aaf0f7eb88583687, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732713932351 2024-11-27T13:25:36,373 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 431712205d90403dbe501e2cf5fc2b97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732713932369 2024-11-27T13:25:36,373 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7647a390bdb64047b169f346e0ab2c2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732713932687 2024-11-27T13:25:36,373 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21c01a72f4db4985ab28f1c5bdfce47b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934846 2024-11-27T13:25:36,382 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#401 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:36,383 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/6cc63a98d77a4ed69e4b996cb6fc3df2 is 50, key is test_row_0/C:col10/1732713934846/Put/seqid=0 2024-11-27T13:25:36,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742303_1479 (size=12139) 2024-11-27T13:25:36,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:36,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,472 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/98da724df9154064855f5360c09d9392 is 50, key is test_row_0/A:col10/1732713935492/Put/seqid=0 2024-11-27T13:25:36,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742304_1480 (size=12001) 2024-11-27T13:25:36,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:36,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713996644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713996645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713996646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713996647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713996648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:36,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713996750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713996751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713996752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713996754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713996754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,769 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/3b2a3b471ddd4aaf89aaf6aaf268569b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3b2a3b471ddd4aaf89aaf6aaf268569b 2024-11-27T13:25:36,773 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 3b2a3b471ddd4aaf89aaf6aaf268569b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:36,773 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:36,773 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=12, startTime=1732713936333; duration=0sec 2024-11-27T13:25:36,773 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:36,773 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:36,797 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/6cc63a98d77a4ed69e4b996cb6fc3df2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6cc63a98d77a4ed69e4b996cb6fc3df2 2024-11-27T13:25:36,801 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 6cc63a98d77a4ed69e4b996cb6fc3df2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:36,801 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:36,801 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=12, startTime=1732713936333; duration=0sec 2024-11-27T13:25:36,801 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:36,801 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:36,887 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/98da724df9154064855f5360c09d9392 2024-11-27T13:25:36,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7610e4c600e2403d8a279df167e91a6a is 50, key is test_row_0/B:col10/1732713935492/Put/seqid=0 2024-11-27T13:25:36,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742305_1481 (size=12001) 2024-11-27T13:25:36,904 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7610e4c600e2403d8a279df167e91a6a 2024-11-27T13:25:36,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/db6c0fe036214538a998262e6ed950d5 is 50, key is test_row_0/C:col10/1732713935492/Put/seqid=0 2024-11-27T13:25:36,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742306_1482 (size=12001) 2024-11-27T13:25:36,930 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/db6c0fe036214538a998262e6ed950d5 2024-11-27T13:25:36,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/98da724df9154064855f5360c09d9392 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392 2024-11-27T13:25:36,937 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T13:25:36,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7610e4c600e2403d8a279df167e91a6a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a 2024-11-27T13:25:36,942 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T13:25:36,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/db6c0fe036214538a998262e6ed950d5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5 2024-11-27T13:25:36,945 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5, entries=150, sequenceid=89, filesize=11.7 K 2024-11-27T13:25:36,946 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 474ms, sequenceid=89, compaction requested=false 2024-11-27T13:25:36,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:36,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:36,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-27T13:25:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-27T13:25:36,951 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-27T13:25:36,951 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3920 sec 2024-11-27T13:25:36,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.3990 sec 2024-11-27T13:25:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:36,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:36,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:36,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/11548d77d8724a95a88f441e2ab2456b is 50, key is test_row_0/A:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:36,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742307_1483 (size=16681) 2024-11-27T13:25:36,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713996968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713996970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713996970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713996971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:36,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713996978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713997079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713997079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713997079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713997079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713997085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713997282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713997284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713997284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713997285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713997289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/11548d77d8724a95a88f441e2ab2456b 2024-11-27T13:25:37,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/ed50d1c6955241b48168545b3f963b70 is 50, key is test_row_0/B:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:37,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742308_1484 (size=12001) 2024-11-27T13:25:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713997590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713997589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713997590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713997590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713997596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-27T13:25:37,658 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-27T13:25:37,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-27T13:25:37,661 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:37,661 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:37,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:37,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/ed50d1c6955241b48168545b3f963b70 2024-11-27T13:25:37,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/17345be7287b46948c29fa20cd83637b is 50, key is test_row_0/C:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:37,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742309_1485 (size=12001) 2024-11-27T13:25:37,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/17345be7287b46948c29fa20cd83637b 2024-11-27T13:25:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/11548d77d8724a95a88f441e2ab2456b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b 2024-11-27T13:25:37,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b, entries=250, sequenceid=117, filesize=16.3 K 2024-11-27T13:25:37,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/ed50d1c6955241b48168545b3f963b70 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70 2024-11-27T13:25:37,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T13:25:37,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/17345be7287b46948c29fa20cd83637b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b 2024-11-27T13:25:37,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b, entries=150, sequenceid=117, filesize=11.7 K 2024-11-27T13:25:37,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for da754bda7623eef518328888f8b63cf4 in 848ms, sequenceid=117, compaction requested=true 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:37,808 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:37,808 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:37,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:37,809 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:37,809 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:37,809 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:37,810 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8a102f8f1b2f49d29aba2559917c9831, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=39.9 K 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:37,810 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:37,810 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3b2a3b471ddd4aaf89aaf6aaf268569b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=35.3 K 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a102f8f1b2f49d29aba2559917c9831, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934846 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b2a3b471ddd4aaf89aaf6aaf268569b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934846 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98da724df9154064855f5360c09d9392, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732713935487 2024-11-27T13:25:37,810 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7610e4c600e2403d8a279df167e91a6a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732713935487 2024-11-27T13:25:37,811 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11548d77d8724a95a88f441e2ab2456b, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:37,811 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ed50d1c6955241b48168545b3f963b70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:37,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:37,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:37,814 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:37,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:37,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/ad6b948c0e56430ca11949098aa584e4 is 50, key is test_row_0/A:col10/1732713936977/Put/seqid=0 2024-11-27T13:25:37,823 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:37,824 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/090cd310ac2a4d41b71987c06364efe8 is 50, key is test_row_0/B:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:37,825 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#410 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:37,826 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/3b8ccb3c9536414599740ef181abfb66 is 50, key is test_row_0/A:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:37,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742311_1487 (size=12241) 2024-11-27T13:25:37,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742312_1488 (size=12241) 2024-11-27T13:25:37,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742310_1486 (size=12001) 2024-11-27T13:25:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:38,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:38,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713998131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713998132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713998133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713998134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713998141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,246 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/090cd310ac2a4d41b71987c06364efe8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/090cd310ac2a4d41b71987c06364efe8 2024-11-27T13:25:38,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713998243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713998243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713998243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713998243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713998247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,251 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 090cd310ac2a4d41b71987c06364efe8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:38,251 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:38,251 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713937808; duration=0sec 2024-11-27T13:25:38,251 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:38,251 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:38,251 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:38,252 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:38,252 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:38,252 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:38,252 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6cc63a98d77a4ed69e4b996cb6fc3df2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=35.3 K 2024-11-27T13:25:38,253 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cc63a98d77a4ed69e4b996cb6fc3df2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732713934846 2024-11-27T13:25:38,253 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting db6c0fe036214538a998262e6ed950d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732713935487 2024-11-27T13:25:38,253 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 17345be7287b46948c29fa20cd83637b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:38,261 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:38,261 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/074bbe8281c5440ea1b7031e3c7ebf78 is 50, key is test_row_0/C:col10/1732713936647/Put/seqid=0 2024-11-27T13:25:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:38,266 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/3b8ccb3c9536414599740ef181abfb66 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/3b8ccb3c9536414599740ef181abfb66 2024-11-27T13:25:38,270 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 3b8ccb3c9536414599740ef181abfb66(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:38,270 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:38,270 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713937808; duration=0sec 2024-11-27T13:25:38,271 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:38,271 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:38,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742313_1489 (size=12241) 2024-11-27T13:25:38,279 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/ad6b948c0e56430ca11949098aa584e4 2024-11-27T13:25:38,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/917df64a16e24e619230198607c9a4d4 is 50, key is test_row_0/B:col10/1732713936977/Put/seqid=0 2024-11-27T13:25:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742314_1490 (size=12001) 2024-11-27T13:25:38,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713998449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713998449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713998450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713998451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713998451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,677 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/074bbe8281c5440ea1b7031e3c7ebf78 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/074bbe8281c5440ea1b7031e3c7ebf78 2024-11-27T13:25:38,681 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 074bbe8281c5440ea1b7031e3c7ebf78(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:38,681 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:38,681 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713937808; duration=0sec 2024-11-27T13:25:38,681 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:38,681 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:38,702 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/917df64a16e24e619230198607c9a4d4 2024-11-27T13:25:38,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f is 50, key is test_row_0/C:col10/1732713936977/Put/seqid=0 2024-11-27T13:25:38,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742315_1491 (size=12001) 2024-11-27T13:25:38,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713998754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713998754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713998755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713998757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713998757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:38,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:39,115 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f 2024-11-27T13:25:39,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/ad6b948c0e56430ca11949098aa584e4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4 2024-11-27T13:25:39,123 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4, entries=150, sequenceid=127, filesize=11.7 K 2024-11-27T13:25:39,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/917df64a16e24e619230198607c9a4d4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4 2024-11-27T13:25:39,127 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4, entries=150, sequenceid=127, filesize=11.7 K 2024-11-27T13:25:39,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f 2024-11-27T13:25:39,131 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f, entries=150, sequenceid=127, filesize=11.7 K 2024-11-27T13:25:39,133 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for da754bda7623eef518328888f8b63cf4 in 1319ms, sequenceid=127, compaction requested=false 2024-11-27T13:25:39,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:39,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:39,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-27T13:25:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-27T13:25:39,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-27T13:25:39,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4730 sec 2024-11-27T13:25:39,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4770 sec 2024-11-27T13:25:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:39,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-27T13:25:39,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:39,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:39,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:39,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7781852f00474a0293450dc3669478dd is 50, key is test_row_0/A:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:39,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742316_1492 (size=14541) 2024-11-27T13:25:39,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713999269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713999271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713999271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713999271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713999272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713999378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713999378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713999379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713999379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713999379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713999584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713999585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713999586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713999586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713999586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7781852f00474a0293450dc3669478dd 2024-11-27T13:25:39,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/405aad971a664660ba153fff03c3b8a2 is 50, key is test_row_0/B:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:39,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742317_1493 (size=12151) 2024-11-27T13:25:39,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-27T13:25:39,767 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-27T13:25:39,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:39,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-27T13:25:39,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:39,770 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:39,770 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:39,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:39,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732713999889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732713999891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732713999893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732713999893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732713999893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:39,922 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:39,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T13:25:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:39,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:40,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:40,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T13:25:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/405aad971a664660ba153fff03c3b8a2 2024-11-27T13:25:40,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e7be03aede6c4baf8335e7d52bcf87b7 is 50, key is test_row_0/C:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:40,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742318_1494 (size=12151) 2024-11-27T13:25:40,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:40,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T13:25:40,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:40,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:40,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:40,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T13:25:40,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:40,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714000398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714000398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:40,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714000398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:40,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714000398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:40,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714000401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:40,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e7be03aede6c4baf8335e7d52bcf87b7 2024-11-27T13:25:40,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7781852f00474a0293450dc3669478dd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd 2024-11-27T13:25:40,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd, entries=200, sequenceid=159, filesize=14.2 K 2024-11-27T13:25:40,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/405aad971a664660ba153fff03c3b8a2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2 2024-11-27T13:25:40,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2, entries=150, sequenceid=159, filesize=11.9 K 2024-11-27T13:25:40,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e7be03aede6c4baf8335e7d52bcf87b7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7 2024-11-27T13:25:40,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7, entries=150, sequenceid=159, filesize=11.9 K 2024-11-27T13:25:40,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for da754bda7623eef518328888f8b63cf4 in 1267ms, sequenceid=159, compaction requested=true 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:40,531 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:40,531 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:40,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:40,532 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:40,532 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:40,532 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:40,532 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:40,532 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,532 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,532 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/090cd310ac2a4d41b71987c06364efe8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=35.5 K 2024-11-27T13:25:40,532 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/3b8ccb3c9536414599740ef181abfb66, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=37.9 K 2024-11-27T13:25:40,533 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 090cd310ac2a4d41b71987c06364efe8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:40,533 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b8ccb3c9536414599740ef181abfb66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:40,533 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad6b948c0e56430ca11949098aa584e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732713936968 2024-11-27T13:25:40,533 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 917df64a16e24e619230198607c9a4d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732713936968 2024-11-27T13:25:40,533 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 405aad971a664660ba153fff03c3b8a2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:40,534 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7781852f00474a0293450dc3669478dd, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:40,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:40,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-27T13:25:40,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,535 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:40,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:40,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/32004ab467e748488271e67f66b7cef6 is 50, key is test_row_0/A:col10/1732713939271/Put/seqid=0 2024-11-27T13:25:40,547 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:40,547 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#419 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:40,547 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/b1510c9258e849f7a6f9f7d891552c80 is 50, key is test_row_0/B:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:40,547 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8ecdb2a86c874644bb6bc1189d510d53 is 50, key is test_row_0/A:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:40,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742319_1495 (size=9757) 2024-11-27T13:25:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742320_1496 (size=12493) 2024-11-27T13:25:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742321_1497 (size=12493) 2024-11-27T13:25:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:40,949 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/32004ab467e748488271e67f66b7cef6 2024-11-27T13:25:40,959 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/b1510c9258e849f7a6f9f7d891552c80 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b1510c9258e849f7a6f9f7d891552c80 2024-11-27T13:25:40,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2ed429ceb96a43dc9365fd44d931a3f8 is 50, key is test_row_0/B:col10/1732713939271/Put/seqid=0 2024-11-27T13:25:40,964 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into b1510c9258e849f7a6f9f7d891552c80(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:40,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:40,964 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713940531; duration=0sec 2024-11-27T13:25:40,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:40,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:40,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:40,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:40,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:40,965 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:40,965 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/074bbe8281c5440ea1b7031e3c7ebf78, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=35.5 K 2024-11-27T13:25:40,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 074bbe8281c5440ea1b7031e3c7ebf78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732713936644 2024-11-27T13:25:40,966 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 96bc3f1f5cbf4f3fa6c5fab7583f5f6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732713936968 2024-11-27T13:25:40,966 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e7be03aede6c4baf8335e7d52bcf87b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:40,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742322_1498 (size=9757) 2024-11-27T13:25:40,967 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2ed429ceb96a43dc9365fd44d931a3f8 2024-11-27T13:25:40,977 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:40,978 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/216abe7698d548f5a9eba5ee03c57d36 is 50, key is test_row_0/C:col10/1732713939263/Put/seqid=0 2024-11-27T13:25:40,978 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/8ecdb2a86c874644bb6bc1189d510d53 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8ecdb2a86c874644bb6bc1189d510d53 2024-11-27T13:25:40,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fe761e1aa765429f914efd432f659ac6 is 50, key is test_row_0/C:col10/1732713939271/Put/seqid=0 2024-11-27T13:25:40,987 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 8ecdb2a86c874644bb6bc1189d510d53(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:40,987 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:40,987 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713940531; duration=0sec 2024-11-27T13:25:40,987 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:40,987 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:40,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742323_1499 (size=12493) 2024-11-27T13:25:40,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742324_1500 (size=9757) 2024-11-27T13:25:40,994 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/216abe7698d548f5a9eba5ee03c57d36 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/216abe7698d548f5a9eba5ee03c57d36 2024-11-27T13:25:41,001 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 216abe7698d548f5a9eba5ee03c57d36(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:41,001 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:41,001 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713940531; duration=0sec 2024-11-27T13:25:41,001 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:41,001 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:41,395 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fe761e1aa765429f914efd432f659ac6 2024-11-27T13:25:41,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/32004ab467e748488271e67f66b7cef6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6 2024-11-27T13:25:41,408 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6, entries=100, sequenceid=167, filesize=9.5 K 2024-11-27T13:25:41,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2ed429ceb96a43dc9365fd44d931a3f8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8 2024-11-27T13:25:41,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:41,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:41,412 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8, entries=100, sequenceid=167, filesize=9.5 K 2024-11-27T13:25:41,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fe761e1aa765429f914efd432f659ac6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6 2024-11-27T13:25:41,417 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6, entries=100, sequenceid=167, filesize=9.5 K 2024-11-27T13:25:41,418 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=46.96 KB/48090 for da754bda7623eef518328888f8b63cf4 in 883ms, sequenceid=167, compaction requested=false 2024-11-27T13:25:41,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:41,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:41,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-27T13:25:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-27T13:25:41,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:41,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:41,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-27T13:25:41,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6480 sec 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:41,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:41,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.6530 sec 2024-11-27T13:25:41,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/adb00b5c7bcb4362838d2b7f8b2221bd is 50, key is test_row_0/A:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:41,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742325_1501 (size=14541) 2024-11-27T13:25:41,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/adb00b5c7bcb4362838d2b7f8b2221bd 2024-11-27T13:25:41,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714001447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/79501eb0b2134f48baacae7f06cce541 is 50, key is test_row_0/B:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714001448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714001449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714001449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714001450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742326_1502 (size=12151) 2024-11-27T13:25:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714001551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714001556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714001556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714001556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714001557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714001755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714001760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714001760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714001760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:41,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714001761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:41,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/79501eb0b2134f48baacae7f06cce541 2024-11-27T13:25:41,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dde94669f4ad4a399f56701b762166de is 50, key is test_row_0/C:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-27T13:25:41,874 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-27T13:25:41,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-27T13:25:41,877 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:41,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T13:25:41,878 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:41,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:41,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742327_1503 (size=12151) 2024-11-27T13:25:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T13:25:42,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,030 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-27T13:25:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714002060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714002065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714002066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714002067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714002067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T13:25:42,182 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-27T13:25:42,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:42,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dde94669f4ad4a399f56701b762166de 2024-11-27T13:25:42,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/adb00b5c7bcb4362838d2b7f8b2221bd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd 2024-11-27T13:25:42,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd, entries=200, sequenceid=182, filesize=14.2 K 2024-11-27T13:25:42,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/79501eb0b2134f48baacae7f06cce541 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541 2024-11-27T13:25:42,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541, entries=150, sequenceid=182, filesize=11.9 K 2024-11-27T13:25:42,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dde94669f4ad4a399f56701b762166de as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de 2024-11-27T13:25:42,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de, entries=150, sequenceid=182, filesize=11.9 K 2024-11-27T13:25:42,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 880ms, sequenceid=182, compaction requested=true 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:42,301 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:42,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:42,301 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:42,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:42,302 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:42,302 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,302 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8ecdb2a86c874644bb6bc1189d510d53, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=35.9 K 2024-11-27T13:25:42,302 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:42,302 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:42,302 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,302 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b1510c9258e849f7a6f9f7d891552c80, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=33.6 K 2024-11-27T13:25:42,303 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ecdb2a86c874644bb6bc1189d510d53, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:42,303 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b1510c9258e849f7a6f9f7d891552c80, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:42,303 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32004ab467e748488271e67f66b7cef6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732713939270 2024-11-27T13:25:42,303 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting adb00b5c7bcb4362838d2b7f8b2221bd, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:42,303 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ed429ceb96a43dc9365fd44d931a3f8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732713939270 2024-11-27T13:25:42,304 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 79501eb0b2134f48baacae7f06cce541, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:42,311 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:42,311 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f293604544c64905b8fa22e88d4aa360 is 50, key is test_row_0/A:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:42,313 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#427 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:42,314 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/cbf36e3def174aeab27608bc72716685 is 50, key is test_row_0/B:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:42,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742328_1504 (size=12595) 2024-11-27T13:25:42,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742329_1505 (size=12595) 2024-11-27T13:25:42,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,337 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:42,337 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/cbf36e3def174aeab27608bc72716685 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/cbf36e3def174aeab27608bc72716685 2024-11-27T13:25:42,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f3dec0f5ae85417caf90f2931de70740 is 50, key is test_row_0/A:col10/1732713941448/Put/seqid=0 2024-11-27T13:25:42,342 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into cbf36e3def174aeab27608bc72716685(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:42,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:42,342 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713942301; duration=0sec 2024-11-27T13:25:42,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:42,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:42,342 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:42,343 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:42,343 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:42,343 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,343 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/216abe7698d548f5a9eba5ee03c57d36, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=33.6 K 2024-11-27T13:25:42,344 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 216abe7698d548f5a9eba5ee03c57d36, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732713938132 2024-11-27T13:25:42,344 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fe761e1aa765429f914efd432f659ac6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732713939270 2024-11-27T13:25:42,344 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting dde94669f4ad4a399f56701b762166de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:42,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742330_1506 (size=12151) 2024-11-27T13:25:42,348 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f3dec0f5ae85417caf90f2931de70740 2024-11-27T13:25:42,354 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:42,354 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/18719e4d3f434e5c8a322acefd98ba73 is 50, key is test_row_0/C:col10/1732713941415/Put/seqid=0 2024-11-27T13:25:42,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/c782c7713fcd4bc0933c19574f6bdc5a is 50, key is test_row_0/B:col10/1732713941448/Put/seqid=0 2024-11-27T13:25:42,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742331_1507 (size=12595) 2024-11-27T13:25:42,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742332_1508 (size=12151) 2024-11-27T13:25:42,372 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/c782c7713fcd4bc0933c19574f6bdc5a 2024-11-27T13:25:42,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/8facae7e46e14f4f978fa8ea7e3c38e8 is 50, key is test_row_0/C:col10/1732713941448/Put/seqid=0 2024-11-27T13:25:42,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742333_1509 (size=12151) 2024-11-27T13:25:42,394 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/8facae7e46e14f4f978fa8ea7e3c38e8 2024-11-27T13:25:42,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f3dec0f5ae85417caf90f2931de70740 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740 2024-11-27T13:25:42,403 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740, entries=150, sequenceid=206, filesize=11.9 K 2024-11-27T13:25:42,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/c782c7713fcd4bc0933c19574f6bdc5a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a 2024-11-27T13:25:42,409 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a, entries=150, sequenceid=206, filesize=11.9 K 2024-11-27T13:25:42,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/8facae7e46e14f4f978fa8ea7e3c38e8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8 2024-11-27T13:25:42,412 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8, entries=150, sequenceid=206, filesize=11.9 K 2024-11-27T13:25:42,413 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for da754bda7623eef518328888f8b63cf4 in 76ms, sequenceid=206, compaction requested=false 2024-11-27T13:25:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-27T13:25:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-27T13:25:42,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-27T13:25:42,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 536 msec 2024-11-27T13:25:42,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 541 msec 2024-11-27T13:25:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-27T13:25:42,480 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-27T13:25:42,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-27T13:25:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:42,483 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:42,484 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:42,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:42,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:42,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:42,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:42,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/a4752ca02ee84c4e9ce7be14df60cdbb is 50, key is test_row_0/A:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742334_1510 (size=16931) 2024-11-27T13:25:42,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/a4752ca02ee84c4e9ce7be14df60cdbb 2024-11-27T13:25:42,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f5e4440499e1484299ac4dc3e6e45563 is 50, key is test_row_0/B:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:42,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742335_1511 (size=12151) 2024-11-27T13:25:42,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714002618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714002619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714002620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714002621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714002627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,636 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,730 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f293604544c64905b8fa22e88d4aa360 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f293604544c64905b8fa22e88d4aa360 2024-11-27T13:25:42,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714002729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714002729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,735 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into f293604544c64905b8fa22e88d4aa360(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:42,735 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:42,735 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713942301; duration=0sec 2024-11-27T13:25:42,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,735 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:42,735 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714002729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714002729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714002734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,770 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/18719e4d3f434e5c8a322acefd98ba73 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/18719e4d3f434e5c8a322acefd98ba73 2024-11-27T13:25:42,774 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 18719e4d3f434e5c8a322acefd98ba73(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:42,774 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:42,774 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713942301; duration=0sec 2024-11-27T13:25:42,774 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:42,774 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:42,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714002936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714002937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,941 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:42,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714002937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714002937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:42,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:42,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714002940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:42,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:42,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:42,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f5e4440499e1484299ac4dc3e6e45563 2024-11-27T13:25:43,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3f029e4313be4147aaa6c3449728b7d1 is 50, key is test_row_0/C:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:43,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742336_1512 (size=12151) 2024-11-27T13:25:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:43,094 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:43,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:43,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714003241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714003242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714003242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714003242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714003243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,399 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:43,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:43,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3f029e4313be4147aaa6c3449728b7d1 2024-11-27T13:25:43,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/a4752ca02ee84c4e9ce7be14df60cdbb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb 2024-11-27T13:25:43,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb, entries=250, sequenceid=219, filesize=16.5 K 2024-11-27T13:25:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-27T13:25:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/f5e4440499e1484299ac4dc3e6e45563 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563 2024-11-27T13:25:43,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563, entries=150, sequenceid=219, filesize=11.9 K 2024-11-27T13:25:43,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3f029e4313be4147aaa6c3449728b7d1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1 2024-11-27T13:25:43,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1, entries=150, sequenceid=219, filesize=11.9 K 2024-11-27T13:25:43,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 861ms, sequenceid=219, compaction requested=true 2024-11-27T13:25:43,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:43,448 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:43,448 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:43,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:43,449 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,449 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,449 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/cbf36e3def174aeab27608bc72716685, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.0 K 2024-11-27T13:25:43,449 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f293604544c64905b8fa22e88d4aa360, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=40.7 K 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cbf36e3def174aeab27608bc72716685, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:43,449 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f293604544c64905b8fa22e88d4aa360, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:43,450 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3dec0f5ae85417caf90f2931de70740, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732713941448 2024-11-27T13:25:43,450 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c782c7713fcd4bc0933c19574f6bdc5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732713941448 2024-11-27T13:25:43,450 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f5e4440499e1484299ac4dc3e6e45563, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942578 2024-11-27T13:25:43,450 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4752ca02ee84c4e9ce7be14df60cdbb, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942570 2024-11-27T13:25:43,459 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:43,460 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/43efcd83a6a44356b8c79046fdde5b14 is 50, key is test_row_0/B:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:43,463 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#436 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:43,463 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/a797fd040a5f426d8571bb419c045020 is 50, key is test_row_0/A:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:43,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742337_1513 (size=12697) 2024-11-27T13:25:43,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742338_1514 (size=12697) 2024-11-27T13:25:43,514 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/a797fd040a5f426d8571bb419c045020 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a797fd040a5f426d8571bb419c045020 2024-11-27T13:25:43,520 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into a797fd040a5f426d8571bb419c045020(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:43,520 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,520 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713943448; duration=0sec 2024-11-27T13:25:43,520 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:43,520 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:43,520 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:43,521 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:43,521 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:43,521 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,522 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/18719e4d3f434e5c8a322acefd98ba73, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.0 K 2024-11-27T13:25:43,522 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18719e4d3f434e5c8a322acefd98ba73, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732713941412 2024-11-27T13:25:43,522 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8facae7e46e14f4f978fa8ea7e3c38e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732713941448 2024-11-27T13:25:43,523 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f029e4313be4147aaa6c3449728b7d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942578 2024-11-27T13:25:43,532 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:43,533 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/2b47830804ec4399bf90f093a13e2c7b is 50, key is test_row_0/C:col10/1732713942578/Put/seqid=0 2024-11-27T13:25:43,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:43,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-27T13:25:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,553 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:25:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:43,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:43,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/e9820b62f3d240eda3aac217909effdb is 50, key is test_row_0/A:col10/1732713942620/Put/seqid=0 2024-11-27T13:25:43,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742339_1515 (size=12697) 2024-11-27T13:25:43,580 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/2b47830804ec4399bf90f093a13e2c7b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/2b47830804ec4399bf90f093a13e2c7b 2024-11-27T13:25:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:43,589 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 2b47830804ec4399bf90f093a13e2c7b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:43,589 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,589 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713943448; duration=0sec 2024-11-27T13:25:43,589 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:43,589 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:43,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742340_1516 (size=12151) 2024-11-27T13:25:43,598 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/e9820b62f3d240eda3aac217909effdb 2024-11-27T13:25:43,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/73ea7c9b65e44052b2b8748f0790afdb is 50, key is test_row_0/B:col10/1732713942620/Put/seqid=0 2024-11-27T13:25:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742341_1517 (size=12151) 2024-11-27T13:25:43,622 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/73ea7c9b65e44052b2b8748f0790afdb 2024-11-27T13:25:43,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/aa777b02cf704b10ba522a81ee487167 is 50, key is test_row_0/C:col10/1732713942620/Put/seqid=0 2024-11-27T13:25:43,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742342_1518 (size=12151) 2024-11-27T13:25:43,643 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/aa777b02cf704b10ba522a81ee487167 2024-11-27T13:25:43,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/e9820b62f3d240eda3aac217909effdb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb 2024-11-27T13:25:43,651 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb, entries=150, sequenceid=246, filesize=11.9 K 2024-11-27T13:25:43,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/73ea7c9b65e44052b2b8748f0790afdb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb 2024-11-27T13:25:43,657 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb, entries=150, sequenceid=246, filesize=11.9 K 2024-11-27T13:25:43,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/aa777b02cf704b10ba522a81ee487167 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167 2024-11-27T13:25:43,664 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167, entries=150, sequenceid=246, filesize=11.9 K 2024-11-27T13:25:43,665 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for da754bda7623eef518328888f8b63cf4 in 112ms, sequenceid=246, compaction requested=false 2024-11-27T13:25:43,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-27T13:25:43,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-27T13:25:43,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-27T13:25:43,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1840 sec 2024-11-27T13:25:43,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.1910 sec 2024-11-27T13:25:43,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:43,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:43,802 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/eacfbc813dc14131bc32a7af9f266c65 is 50, key is test_row_0/A:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:43,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742343_1519 (size=12251) 2024-11-27T13:25:43,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/eacfbc813dc14131bc32a7af9f266c65 2024-11-27T13:25:43,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/286e5a8fb58c404188d32a20eda9e71c is 50, key is test_row_0/B:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:43,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742344_1520 (size=12251) 2024-11-27T13:25:43,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/286e5a8fb58c404188d32a20eda9e71c 2024-11-27T13:25:43,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714003829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714003831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/d974c4d5c5fe4837971487b972686e98 is 50, key is test_row_0/C:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:43,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742345_1521 (size=12251) 2024-11-27T13:25:43,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/d974c4d5c5fe4837971487b972686e98 2024-11-27T13:25:43,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714003834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714003835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714003836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/eacfbc813dc14131bc32a7af9f266c65 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65 2024-11-27T13:25:43,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65, entries=150, sequenceid=259, filesize=12.0 K 2024-11-27T13:25:43,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/286e5a8fb58c404188d32a20eda9e71c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c 2024-11-27T13:25:43,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c, entries=150, sequenceid=259, filesize=12.0 K 2024-11-27T13:25:43,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/d974c4d5c5fe4837971487b972686e98 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98 2024-11-27T13:25:43,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98, entries=150, sequenceid=259, filesize=12.0 K 2024-11-27T13:25:43,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 79ms, sequenceid=259, compaction requested=true 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:43,879 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:25:43,880 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:43,880 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:43,880 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,881 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a797fd040a5f426d8571bb419c045020, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.2 K 2024-11-27T13:25:43,881 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a797fd040a5f426d8571bb419c045020, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942578 2024-11-27T13:25:43,882 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9820b62f3d240eda3aac217909effdb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732713942617 2024-11-27T13:25:43,883 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting eacfbc813dc14131bc32a7af9f266c65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:43,887 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/43efcd83a6a44356b8c79046fdde5b14 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/43efcd83a6a44356b8c79046fdde5b14 2024-11-27T13:25:43,894 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 43efcd83a6a44356b8c79046fdde5b14(size=12.4 K), total size for store is 36.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:43,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:43,894 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713943448; duration=0sec 2024-11-27T13:25:43,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:25:43,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:43,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:43,894 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:43,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:43,895 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:43,895 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:43,895 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/2b47830804ec4399bf90f093a13e2c7b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.2 K 2024-11-27T13:25:43,896 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b47830804ec4399bf90f093a13e2c7b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942578 2024-11-27T13:25:43,897 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting aa777b02cf704b10ba522a81ee487167, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732713942617 2024-11-27T13:25:43,897 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d974c4d5c5fe4837971487b972686e98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:43,910 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:43,910 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/df0bc83b45de48259e5d67c4d8d09714 is 50, key is test_row_0/A:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:43,926 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#445 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:43,927 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/498e5210125e4cd2b10d7d09f3e1e793 is 50, key is test_row_0/C:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:43,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:43,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:43,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/57f451d49b864fe4ae76caba92d61c88 is 50, key is test_row_0/A:col10/1732713943940/Put/seqid=0 2024-11-27T13:25:43,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714003958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742346_1522 (size=12899) 2024-11-27T13:25:43,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714003959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:43,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742347_1523 (size=12899) 2024-11-27T13:25:43,998 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/df0bc83b45de48259e5d67c4d8d09714 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/df0bc83b45de48259e5d67c4d8d09714 2024-11-27T13:25:43,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714003985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714003985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714003991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,004 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into df0bc83b45de48259e5d67c4d8d09714(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:44,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:44,004 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713943879; duration=0sec 2024-11-27T13:25:44,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:44,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:44,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:44,005 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:44,005 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:44,005 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:44,005 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/43efcd83a6a44356b8c79046fdde5b14, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.2 K 2024-11-27T13:25:44,006 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43efcd83a6a44356b8c79046fdde5b14, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732713942578 2024-11-27T13:25:44,007 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73ea7c9b65e44052b2b8748f0790afdb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732713942617 2024-11-27T13:25:44,007 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 286e5a8fb58c404188d32a20eda9e71c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:44,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742348_1524 (size=14741) 2024-11-27T13:25:44,015 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/57f451d49b864fe4ae76caba92d61c88 2024-11-27T13:25:44,024 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:44,024 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/ed2b9ac2ad724571bee16ebd72c655f2 is 50, key is test_row_0/B:col10/1732713943801/Put/seqid=0 2024-11-27T13:25:44,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/0193dcf8935241b883ff8e4c238800ad is 50, key is test_row_0/B:col10/1732713943940/Put/seqid=0 2024-11-27T13:25:44,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742349_1525 (size=12899) 2024-11-27T13:25:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742350_1526 (size=12301) 2024-11-27T13:25:44,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714004086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714004091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714004101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714004102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714004102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714004292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714004295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714004308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714004309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714004309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,398 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/498e5210125e4cd2b10d7d09f3e1e793 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/498e5210125e4cd2b10d7d09f3e1e793 2024-11-27T13:25:44,402 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 498e5210125e4cd2b10d7d09f3e1e793(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:44,402 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:44,402 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713943879; duration=0sec 2024-11-27T13:25:44,402 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:44,402 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:44,458 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/ed2b9ac2ad724571bee16ebd72c655f2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed2b9ac2ad724571bee16ebd72c655f2 2024-11-27T13:25:44,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/0193dcf8935241b883ff8e4c238800ad 2024-11-27T13:25:44,463 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into ed2b9ac2ad724571bee16ebd72c655f2(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:44,463 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:44,463 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713943879; duration=0sec 2024-11-27T13:25:44,463 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:44,463 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:44,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4ff1aac65d1a4860ad70101a660d8cd6 is 50, key is test_row_0/C:col10/1732713943940/Put/seqid=0 2024-11-27T13:25:44,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742351_1527 (size=12301) 2024-11-27T13:25:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-27T13:25:44,586 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-27T13:25:44,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:44,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-27T13:25:44,589 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:44,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:44,590 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:44,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:44,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714004598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714004603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714004612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714004613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:44,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714004613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:44,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:44,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:44,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-27T13:25:44,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:44,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:44,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:44,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:44,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:44,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:44,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4ff1aac65d1a4860ad70101a660d8cd6 2024-11-27T13:25:44,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/57f451d49b864fe4ae76caba92d61c88 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88 2024-11-27T13:25:44,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88, entries=200, sequenceid=285, filesize=14.4 K 2024-11-27T13:25:44,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/0193dcf8935241b883ff8e4c238800ad as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad 2024-11-27T13:25:44,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad, entries=150, sequenceid=285, filesize=12.0 K 2024-11-27T13:25:44,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4ff1aac65d1a4860ad70101a660d8cd6 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6 2024-11-27T13:25:44,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6, entries=150, sequenceid=285, filesize=12.0 K 2024-11-27T13:25:44,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for da754bda7623eef518328888f8b63cf4 in 945ms, sequenceid=285, compaction requested=false 2024-11-27T13:25:44,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:44,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:44,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:44,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-27T13:25:44,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:44,895 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:44,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/18e2751929be4d9daa5ac75a57b8d255 is 50, key is test_row_0/A:col10/1732713943971/Put/seqid=0 2024-11-27T13:25:44,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742352_1528 (size=12301) 2024-11-27T13:25:45,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:45,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:45,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714005136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714005137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714005138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714005139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714005140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:45,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714005243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714005244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714005244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714005244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714005245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,304 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/18e2751929be4d9daa5ac75a57b8d255 2024-11-27T13:25:45,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7754394acfab48abaf051fc0d67880c5 is 50, key is test_row_0/B:col10/1732713943971/Put/seqid=0 2024-11-27T13:25:45,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742353_1529 (size=12301) 2024-11-27T13:25:45,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714005447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714005447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714005448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714005448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714005451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:45,728 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7754394acfab48abaf051fc0d67880c5 2024-11-27T13:25:45,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/675ec0932ebf442a99de8bc952246ab3 is 50, key is test_row_0/C:col10/1732713943971/Put/seqid=0 2024-11-27T13:25:45,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742354_1530 (size=12301) 2024-11-27T13:25:45,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714005753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714005754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714005754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714005755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:45,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714005757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,140 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/675ec0932ebf442a99de8bc952246ab3 2024-11-27T13:25:46,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/18e2751929be4d9daa5ac75a57b8d255 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255 2024-11-27T13:25:46,147 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255, entries=150, sequenceid=300, filesize=12.0 K 2024-11-27T13:25:46,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7754394acfab48abaf051fc0d67880c5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5 2024-11-27T13:25:46,152 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5, entries=150, sequenceid=300, filesize=12.0 K 2024-11-27T13:25:46,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/675ec0932ebf442a99de8bc952246ab3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3 2024-11-27T13:25:46,159 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3, entries=150, sequenceid=300, filesize=12.0 K 2024-11-27T13:25:46,160 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for da754bda7623eef518328888f8b63cf4 in 1265ms, sequenceid=300, compaction requested=true 2024-11-27T13:25:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-27T13:25:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-27T13:25:46,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-27T13:25:46,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5720 sec 2024-11-27T13:25:46,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.5750 sec 2024-11-27T13:25:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:46,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:46,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:46,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7cd1cafb6aaf4847a97638109e764285 is 50, key is test_row_0/A:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:46,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742355_1531 (size=14741) 2024-11-27T13:25:46,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7cd1cafb6aaf4847a97638109e764285 2024-11-27T13:25:46,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714006271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714006274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2a4384a2ac6048428f8171d64d3306a8 is 50, key is test_row_0/B:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714006275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742356_1532 (size=12301) 2024-11-27T13:25:46,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714006280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714006280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714006382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714006382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714006383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714006390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714006390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714006586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714006587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714006587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714006594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714006594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2a4384a2ac6048428f8171d64d3306a8 2024-11-27T13:25:46,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-27T13:25:46,693 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-27T13:25:46,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e4e9aae6927d4518b3ff10caf53bc069 is 50, key is test_row_0/C:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:46,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:46,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-27T13:25:46,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:46,696 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:46,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:46,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742357_1533 (size=12301) 2024-11-27T13:25:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:46,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:46,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-27T13:25:46,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:46,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:46,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:46,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:46,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:46,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714006890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714006892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714006892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714006898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:46,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714006899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:46,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:47,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:47,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-27T13:25:47,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:47,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:47,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:47,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e4e9aae6927d4518b3ff10caf53bc069 2024-11-27T13:25:47,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7cd1cafb6aaf4847a97638109e764285 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285 2024-11-27T13:25:47,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285, entries=200, sequenceid=325, filesize=14.4 K 2024-11-27T13:25:47,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2a4384a2ac6048428f8171d64d3306a8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8 2024-11-27T13:25:47,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8, entries=150, sequenceid=325, filesize=12.0 K 2024-11-27T13:25:47,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/e4e9aae6927d4518b3ff10caf53bc069 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069 2024-11-27T13:25:47,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069, entries=150, sequenceid=325, filesize=12.0 K 2024-11-27T13:25:47,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for da754bda7623eef518328888f8b63cf4 in 860ms, sequenceid=325, compaction requested=true 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:47,126 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:47,126 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:47,129 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:47,129 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:47,129 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,129 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed2b9ac2ad724571bee16ebd72c655f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=48.6 K 2024-11-27T13:25:47,129 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54682 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:47,129 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:47,129 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,129 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/df0bc83b45de48259e5d67c4d8d09714, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=53.4 K 2024-11-27T13:25:47,130 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting df0bc83b45de48259e5d67c4d8d09714, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:47,130 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting ed2b9ac2ad724571bee16ebd72c655f2, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:47,130 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0193dcf8935241b883ff8e4c238800ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732713943832 2024-11-27T13:25:47,130 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57f451d49b864fe4ae76caba92d61c88, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732713943832 2024-11-27T13:25:47,131 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7754394acfab48abaf051fc0d67880c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732713943964 2024-11-27T13:25:47,131 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18e2751929be4d9daa5ac75a57b8d255, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732713943964 2024-11-27T13:25:47,131 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cd1cafb6aaf4847a97638109e764285, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:47,131 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a4384a2ac6048428f8171d64d3306a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:47,139 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:47,140 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2fac48e9991c4ace8f4e1a806679f858 is 50, key is test_row_0/B:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:47,142 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#457 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:47,143 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/73d6a1619b5d420d9f960e05a264b9e5 is 50, key is test_row_0/A:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:47,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742359_1535 (size=13085) 2024-11-27T13:25:47,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742358_1534 (size=13085) 2024-11-27T13:25:47,154 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:47,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-27T13:25:47,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,154 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:47,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:47,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/aa71dd985b15427c88d66e31540b3a73 is 50, key is test_row_0/A:col10/1732713946278/Put/seqid=0 2024-11-27T13:25:47,166 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/2fac48e9991c4ace8f4e1a806679f858 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2fac48e9991c4ace8f4e1a806679f858 2024-11-27T13:25:47,172 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 2fac48e9991c4ace8f4e1a806679f858(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:47,172 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:47,172 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=12, startTime=1732713947126; duration=0sec 2024-11-27T13:25:47,172 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:47,172 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:47,172 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:47,174 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:47,174 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:47,174 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:47,174 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/498e5210125e4cd2b10d7d09f3e1e793, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=48.6 K 2024-11-27T13:25:47,174 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 498e5210125e4cd2b10d7d09f3e1e793, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732713943797 2024-11-27T13:25:47,174 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ff1aac65d1a4860ad70101a660d8cd6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732713943832 2024-11-27T13:25:47,175 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 675ec0932ebf442a99de8bc952246ab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732713943964 2024-11-27T13:25:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742360_1536 (size=12301) 2024-11-27T13:25:47,175 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting e4e9aae6927d4518b3ff10caf53bc069, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:47,176 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/aa71dd985b15427c88d66e31540b3a73 2024-11-27T13:25:47,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d19e137cbf84477396410d788d18f99c is 50, key is test_row_0/B:col10/1732713946278/Put/seqid=0 2024-11-27T13:25:47,190 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:47,191 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/168daedd83b341b6993aa7e17f8c7028 is 50, key is test_row_0/C:col10/1732713945138/Put/seqid=0 2024-11-27T13:25:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742361_1537 (size=12301) 2024-11-27T13:25:47,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742362_1538 (size=13085) 2024-11-27T13:25:47,231 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/168daedd83b341b6993aa7e17f8c7028 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/168daedd83b341b6993aa7e17f8c7028 2024-11-27T13:25:47,235 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 168daedd83b341b6993aa7e17f8c7028(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:47,235 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:47,235 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=12, startTime=1732713947126; duration=0sec 2024-11-27T13:25:47,236 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:47,236 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:47,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:47,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:47,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714007427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714007432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714007433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714007434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714007435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714007537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714007537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714007541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714007541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714007542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,557 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/73d6a1619b5d420d9f960e05a264b9e5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/73d6a1619b5d420d9f960e05a264b9e5 2024-11-27T13:25:47,561 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 73d6a1619b5d420d9f960e05a264b9e5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:47,561 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:47,561 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=12, startTime=1732713947126; duration=0sec 2024-11-27T13:25:47,561 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:47,561 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:47,614 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d19e137cbf84477396410d788d18f99c 2024-11-27T13:25:47,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dae3d98de2e24de29047bfef09f039f0 is 50, key is test_row_0/C:col10/1732713946278/Put/seqid=0 2024-11-27T13:25:47,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742363_1539 (size=12301) 2024-11-27T13:25:47,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714007741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714007742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714007748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714007748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:47,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714007748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:47,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:48,035 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dae3d98de2e24de29047bfef09f039f0 2024-11-27T13:25:48,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/aa71dd985b15427c88d66e31540b3a73 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73 2024-11-27T13:25:48,043 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73, entries=150, sequenceid=336, filesize=12.0 K 2024-11-27T13:25:48,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d19e137cbf84477396410d788d18f99c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c 2024-11-27T13:25:48,047 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c, entries=150, sequenceid=336, filesize=12.0 K 2024-11-27T13:25:48,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/dae3d98de2e24de29047bfef09f039f0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0 2024-11-27T13:25:48,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714008047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714008048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,051 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0, entries=150, sequenceid=336, filesize=12.0 K 2024-11-27T13:25:48,052 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for da754bda7623eef518328888f8b63cf4 in 898ms, sequenceid=336, compaction requested=false 2024-11-27T13:25:48,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:48,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:48,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-27T13:25:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-27T13:25:48,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-27T13:25:48,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3580 sec 2024-11-27T13:25:48,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.3610 sec 2024-11-27T13:25:48,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:48,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:48,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7903442b30cc4536aae9d299d0d8bbfb is 50, key is test_row_0/A:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714008065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714008068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742364_1540 (size=14741) 2024-11-27T13:25:48,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714008069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714008172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714008172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714008177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714008375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714008377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714008380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7903442b30cc4536aae9d299d0d8bbfb 2024-11-27T13:25:48,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/11ed1181d99e4ba9a0f7b7705c7c3913 is 50, key is test_row_0/B:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742365_1541 (size=12301) 2024-11-27T13:25:48,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714008555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714008555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714008681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714008682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714008683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:48,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-27T13:25:48,800 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-27T13:25:48,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-11-27T13:25:48,803 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-27T13:25:48,803 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:48,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:48,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/11ed1181d99e4ba9a0f7b7705c7c3913 2024-11-27T13:25:48,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4d9f76e7839c47faa42b2e3f1b53902b is 50, key is test_row_0/C:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-27T13:25:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742366_1542 (size=12301) 2024-11-27T13:25:48,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4d9f76e7839c47faa42b2e3f1b53902b 2024-11-27T13:25:48,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7903442b30cc4536aae9d299d0d8bbfb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb 2024-11-27T13:25:48,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb, entries=200, sequenceid=365, filesize=14.4 K 2024-11-27T13:25:48,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/11ed1181d99e4ba9a0f7b7705c7c3913 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913 2024-11-27T13:25:48,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913, entries=150, sequenceid=365, filesize=12.0 K 2024-11-27T13:25:48,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4d9f76e7839c47faa42b2e3f1b53902b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b 2024-11-27T13:25:48,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b, entries=150, sequenceid=365, filesize=12.0 K 2024-11-27T13:25:48,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for da754bda7623eef518328888f8b63cf4 in 871ms, sequenceid=365, compaction requested=true 2024-11-27T13:25:48,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:48,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:48,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:48,928 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:48,929 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:48,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:48,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:48,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:48,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:48,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:48,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:48,930 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:48,930 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/73d6a1619b5d420d9f960e05a264b9e5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=39.2 K 2024-11-27T13:25:48,930 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:48,930 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:48,930 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:48,930 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2fac48e9991c4ace8f4e1a806679f858, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.8 K 2024-11-27T13:25:48,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73d6a1619b5d420d9f960e05a264b9e5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:48,931 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fac48e9991c4ace8f4e1a806679f858, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:48,931 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa71dd985b15427c88d66e31540b3a73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732713946268 2024-11-27T13:25:48,931 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d19e137cbf84477396410d788d18f99c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732713946268 2024-11-27T13:25:48,931 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7903442b30cc4536aae9d299d0d8bbfb, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947426 2024-11-27T13:25:48,931 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 11ed1181d99e4ba9a0f7b7705c7c3913, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947432 2024-11-27T13:25:48,940 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:48,940 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7d8c8baf15234bd6aa17e9f354e695e0 is 50, key is test_row_0/B:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,942 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:48,942 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/53d9c5b1365d495e9917b0d6f3320849 is 50, key is test_row_0/A:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742367_1543 (size=13187) 2024-11-27T13:25:48,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:48,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:48,956 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:48,956 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/7d8c8baf15234bd6aa17e9f354e695e0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7d8c8baf15234bd6aa17e9f354e695e0 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:48,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:48,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/1502bacadbec47f9960bc61c65606190 is 50, key is test_row_0/A:col10/1732713948063/Put/seqid=0 2024-11-27T13:25:48,964 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 7d8c8baf15234bd6aa17e9f354e695e0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:48,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:48,964 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713948928; duration=0sec 2024-11-27T13:25:48,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:48,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:48,964 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:48,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:48,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:48,965 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:48,965 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/168daedd83b341b6993aa7e17f8c7028, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.8 K 2024-11-27T13:25:48,965 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 168daedd83b341b6993aa7e17f8c7028, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732713945131 2024-11-27T13:25:48,966 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting dae3d98de2e24de29047bfef09f039f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=336, earliestPutTs=1732713946268 2024-11-27T13:25:48,966 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d9f76e7839c47faa42b2e3f1b53902b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947432 2024-11-27T13:25:48,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742368_1544 (size=13187) 2024-11-27T13:25:48,973 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/53d9c5b1365d495e9917b0d6f3320849 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/53d9c5b1365d495e9917b0d6f3320849 2024-11-27T13:25:48,980 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 53d9c5b1365d495e9917b0d6f3320849(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:48,980 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:48,980 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713948928; duration=0sec 2024-11-27T13:25:48,980 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:48,980 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:48,991 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:48,992 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/c35cbcced3f841dfa474369025535b58 is 50, key is test_row_0/C:col10/1732713947434/Put/seqid=0 2024-11-27T13:25:48,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742369_1545 (size=12301) 2024-11-27T13:25:48,996 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/1502bacadbec47f9960bc61c65606190 2024-11-27T13:25:49,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742370_1546 (size=13187) 2024-11-27T13:25:49,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/479c965920744d169d7a0f15bd86d183 is 50, key is test_row_0/B:col10/1732713948063/Put/seqid=0 2024-11-27T13:25:49,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742371_1547 (size=12301) 2024-11-27T13:25:49,019 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/479c965920744d169d7a0f15bd86d183 2024-11-27T13:25:49,019 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/c35cbcced3f841dfa474369025535b58 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/c35cbcced3f841dfa474369025535b58 2024-11-27T13:25:49,025 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into c35cbcced3f841dfa474369025535b58(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:49,025 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,025 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713948929; duration=0sec 2024-11-27T13:25:49,025 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:49,025 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:49,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fc50c114a75a41ebb08fbd2082e246cd is 50, key is test_row_0/C:col10/1732713948063/Put/seqid=0 2024-11-27T13:25:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742372_1548 (size=12301) 2024-11-27T13:25:49,034 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fc50c114a75a41ebb08fbd2082e246cd 2024-11-27T13:25:49,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/1502bacadbec47f9960bc61c65606190 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190 2024-11-27T13:25:49,043 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190, entries=150, sequenceid=375, filesize=12.0 K 2024-11-27T13:25:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/479c965920744d169d7a0f15bd86d183 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183 2024-11-27T13:25:49,048 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183, entries=150, sequenceid=375, filesize=12.0 K 2024-11-27T13:25:49,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/fc50c114a75a41ebb08fbd2082e246cd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd 2024-11-27T13:25:49,053 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd, entries=150, sequenceid=375, filesize=12.0 K 2024-11-27T13:25:49,053 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for da754bda7623eef518328888f8b63cf4 in 97ms, sequenceid=375, compaction requested=false 2024-11-27T13:25:49,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-11-27T13:25:49,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-11-27T13:25:49,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-27T13:25:49,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 251 msec 2024-11-27T13:25:49,058 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 256 msec 2024-11-27T13:25:49,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-27T13:25:49,105 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-27T13:25:49,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:49,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-11-27T13:25:49,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:49,108 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:49,108 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:49,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:49,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:25:49,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:49,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:49,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:49,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/4b9f627759c0466a8063855ed01e79ea is 50, key is test_row_0/A:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742373_1549 (size=17181) 2024-11-27T13:25:49,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:49,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/4b9f627759c0466a8063855ed01e79ea 2024-11-27T13:25:49,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/8fed15a728974356816707b188ff7e0d is 50, key is test_row_0/B:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742374_1550 (size=12301) 2024-11-27T13:25:49,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/8fed15a728974356816707b188ff7e0d 2024-11-27T13:25:49,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3a05818d7f7a4421be98a2605a85e59f is 50, key is test_row_0/C:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742375_1551 (size=12301) 2024-11-27T13:25:49,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3a05818d7f7a4421be98a2605a85e59f 2024-11-27T13:25:49,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/4b9f627759c0466a8063855ed01e79ea as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea 2024-11-27T13:25:49,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea, entries=250, sequenceid=390, filesize=16.8 K 2024-11-27T13:25:49,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/8fed15a728974356816707b188ff7e0d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d 2024-11-27T13:25:49,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d, entries=150, sequenceid=390, filesize=12.0 K 2024-11-27T13:25:49,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/3a05818d7f7a4421be98a2605a85e59f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f 2024-11-27T13:25:49,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f, entries=150, sequenceid=390, filesize=12.0 K 2024-11-27T13:25:49,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=120.76 KB/123660 for da754bda7623eef518328888f8b63cf4 in 49ms, sequenceid=390, compaction requested=true 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:49,247 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:49,247 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:49,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:49,248 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:49,248 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42669 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:49,248 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:49,248 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:49,248 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,248 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,248 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7d8c8baf15234bd6aa17e9f354e695e0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.9 K 2024-11-27T13:25:49,248 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/53d9c5b1365d495e9917b0d6f3320849, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=41.7 K 2024-11-27T13:25:49,249 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d8c8baf15234bd6aa17e9f354e695e0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947432 2024-11-27T13:25:49,249 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53d9c5b1365d495e9917b0d6f3320849, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947432 2024-11-27T13:25:49,249 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 479c965920744d169d7a0f15bd86d183, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732713948063 2024-11-27T13:25:49,249 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fed15a728974356816707b188ff7e0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949196 2024-11-27T13:25:49,250 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1502bacadbec47f9960bc61c65606190, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732713948063 2024-11-27T13:25:49,250 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b9f627759c0466a8063855ed01e79ea, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949192 2024-11-27T13:25:49,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:49,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:49,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/afc0e1978a874b1f9663dd6d78b93d90 is 50, key is test_row_0/A:col10/1732713949237/Put/seqid=0 2024-11-27T13:25:49,257 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:49,257 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/396a3dba44e944c79e37850dcc6fda64 is 50, key is test_row_0/B:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:49,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-27T13:25:49,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:49,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,277 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#476 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:49,278 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f8f17552bd9d4e4dbcf64958f4a6c87a is 50, key is test_row_0/A:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742376_1552 (size=12301) 2024-11-27T13:25:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714009285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/afc0e1978a874b1f9663dd6d78b93d90 2024-11-27T13:25:49,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742377_1553 (size=13289) 2024-11-27T13:25:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714009285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714009285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,301 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/396a3dba44e944c79e37850dcc6fda64 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/396a3dba44e944c79e37850dcc6fda64 2024-11-27T13:25:49,305 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 396a3dba44e944c79e37850dcc6fda64(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:49,306 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,306 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=13, startTime=1732713949247; duration=0sec 2024-11-27T13:25:49,306 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:49,306 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:49,306 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:25:49,306 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:25:49,307 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:49,307 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,307 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/c35cbcced3f841dfa474369025535b58, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=36.9 K 2024-11-27T13:25:49,307 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c35cbcced3f841dfa474369025535b58, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732713947432 2024-11-27T13:25:49,307 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fc50c114a75a41ebb08fbd2082e246cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732713948063 2024-11-27T13:25:49,307 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a05818d7f7a4421be98a2605a85e59f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949196 2024-11-27T13:25:49,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742378_1554 (size=13289) 2024-11-27T13:25:49,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d7bd90e57f964f94b9b006af4793e606 is 50, key is test_row_0/B:col10/1732713949237/Put/seqid=0 2024-11-27T13:25:49,323 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/f8f17552bd9d4e4dbcf64958f4a6c87a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f8f17552bd9d4e4dbcf64958f4a6c87a 2024-11-27T13:25:49,329 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into f8f17552bd9d4e4dbcf64958f4a6c87a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:49,329 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,329 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=13, startTime=1732713949247; duration=0sec 2024-11-27T13:25:49,329 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:49,329 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:49,332 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:49,333 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4dde7468f715487ab9a2036b407d9eba is 50, key is test_row_0/C:col10/1732713949196/Put/seqid=0 2024-11-27T13:25:49,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742379_1555 (size=12301) 2024-11-27T13:25:49,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d7bd90e57f964f94b9b006af4793e606 2024-11-27T13:25:49,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742380_1556 (size=13289) 2024-11-27T13:25:49,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/289a47fa0b4747489fbd038181972921 is 50, key is test_row_0/C:col10/1732713949237/Put/seqid=0 2024-11-27T13:25:49,360 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/4dde7468f715487ab9a2036b407d9eba as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4dde7468f715487ab9a2036b407d9eba 2024-11-27T13:25:49,364 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into 4dde7468f715487ab9a2036b407d9eba(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:49,365 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,365 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=13, startTime=1732713949247; duration=0sec 2024-11-27T13:25:49,365 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:49,365 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:49,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742381_1557 (size=12301) 2024-11-27T13:25:49,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714009397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714009398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714009399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:49,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:49,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-27T13:25:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714009559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:49,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-27T13:25:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:49,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714009569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714009602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714009602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714009602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:49,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:49,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-27T13:25:49,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:49,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:49,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/289a47fa0b4747489fbd038181972921 2024-11-27T13:25:49,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/afc0e1978a874b1f9663dd6d78b93d90 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90 2024-11-27T13:25:49,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:25:49,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/d7bd90e57f964f94b9b006af4793e606 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606 2024-11-27T13:25:49,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:25:49,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/289a47fa0b4747489fbd038181972921 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921 2024-11-27T13:25:49,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:25:49,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for da754bda7623eef518328888f8b63cf4 in 538ms, sequenceid=412, compaction requested=false 2024-11-27T13:25:49,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:49,872 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:49,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-27T13:25:49,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:49,872 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:49,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7846e38ee191420b91847fe5d0ee4702 is 50, key is test_row_0/A:col10/1732713949283/Put/seqid=0 2024-11-27T13:25:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742382_1558 (size=12301) 2024-11-27T13:25:49,884 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7846e38ee191420b91847fe5d0ee4702 2024-11-27T13:25:49,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/244ef7873ef84e428aaccd126bc9f70a is 50, key is test_row_0/B:col10/1732713949283/Put/seqid=0 2024-11-27T13:25:49,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742383_1559 (size=12301) 2024-11-27T13:25:49,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:49,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:49,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714009943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714009943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:49,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:49,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714009948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714010051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714010051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714010053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:50,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714010257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714010258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714010259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,296 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/244ef7873ef84e428aaccd126bc9f70a 2024-11-27T13:25:50,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/6b6afb4f56484e86b5653da4a463a9f5 is 50, key is test_row_0/C:col10/1732713949283/Put/seqid=0 2024-11-27T13:25:50,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742384_1560 (size=12301) 2024-11-27T13:25:50,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714010563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714010563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:50,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714010564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:50,705 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/6b6afb4f56484e86b5653da4a463a9f5 2024-11-27T13:25:50,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/7846e38ee191420b91847fe5d0ee4702 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702 2024-11-27T13:25:50,712 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702, entries=150, sequenceid=430, filesize=12.0 K 2024-11-27T13:25:50,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/244ef7873ef84e428aaccd126bc9f70a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a 2024-11-27T13:25:50,716 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a, entries=150, sequenceid=430, filesize=12.0 K 2024-11-27T13:25:50,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/6b6afb4f56484e86b5653da4a463a9f5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5 2024-11-27T13:25:50,721 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5, entries=150, sequenceid=430, filesize=12.0 K 2024-11-27T13:25:50,722 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for da754bda7623eef518328888f8b63cf4 in 849ms, sequenceid=430, compaction requested=true 2024-11-27T13:25:50,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:50,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:50,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-11-27T13:25:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-11-27T13:25:50,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-27T13:25:50,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6150 sec 2024-11-27T13:25:50,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 1.6190 sec 2024-11-27T13:25:51,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:51,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:51,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:51,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/03657b2275b246d0acac4fb45d39feaa is 50, key is test_row_0/A:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:51,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742385_1561 (size=14741) 2024-11-27T13:25:51,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714011095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714011096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714011102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714011203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714011204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714011209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-27T13:25:51,212 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-27T13:25:51,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:25:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-11-27T13:25:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:51,214 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:25:51,214 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:25:51,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:25:51,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:51,366 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:51,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714011410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714011412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714011412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/03657b2275b246d0acac4fb45d39feaa 2024-11-27T13:25:51,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/4dc3e29b7c364ceaa3e796599fa5eebd is 50, key is test_row_0/B:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742386_1562 (size=12301) 2024-11-27T13:25:51,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:51,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:51,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:51,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45778 deadline: 1732714011580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,587 DEBUG [Thread-2071 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:25:51,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45754 deadline: 1732714011591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,597 DEBUG [Thread-2067 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., hostname=a0541979a851,32819,1732713812705, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:25:51,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:51,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:51,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:51,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714011714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714011717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714011717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:51,824 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:51,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:51,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:51,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/4dc3e29b7c364ceaa3e796599fa5eebd 2024-11-27T13:25:51,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/cae114581f8a420fbd2547b085c16d6a is 50, key is test_row_0/C:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:51,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742387_1563 (size=12301) 2024-11-27T13:25:51,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:51,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:51,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:51,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:51,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,131 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:52,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:52,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45790 deadline: 1732714012220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:52,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45828 deadline: 1732714012221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:52,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:25:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45794 deadline: 1732714012222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 2024-11-27T13:25:52,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:52,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:25:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:52,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/cae114581f8a420fbd2547b085c16d6a 2024-11-27T13:25:52,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/03657b2275b246d0acac4fb45d39feaa as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa 2024-11-27T13:25:52,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa, entries=200, sequenceid=453, filesize=14.4 K 2024-11-27T13:25:52,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/4dc3e29b7c364ceaa3e796599fa5eebd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd 2024-11-27T13:25:52,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd, entries=150, sequenceid=453, filesize=12.0 K 2024-11-27T13:25:52,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/cae114581f8a420fbd2547b085c16d6a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a 2024-11-27T13:25:52,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a, entries=150, sequenceid=453, filesize=12.0 K 2024-11-27T13:25:52,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for da754bda7623eef518328888f8b63cf4 in 1258ms, sequenceid=453, compaction requested=true 2024-11-27T13:25:52,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:52,334 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da754bda7623eef518328888f8b63cf4:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:25:52,334 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:52,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:52,335 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:52,335 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/B is initiating minor compaction (all files) 2024-11-27T13:25:52,335 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/B in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,335 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/396a3dba44e944c79e37850dcc6fda64, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=49.0 K 2024-11-27T13:25:52,335 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52632 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:52,335 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/A is initiating minor compaction (all files) 2024-11-27T13:25:52,336 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/A in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,336 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f8f17552bd9d4e4dbcf64958f4a6c87a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=51.4 K 2024-11-27T13:25:52,336 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 396a3dba44e944c79e37850dcc6fda64, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949196 2024-11-27T13:25:52,336 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8f17552bd9d4e4dbcf64958f4a6c87a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949196 2024-11-27T13:25:52,336 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d7bd90e57f964f94b9b006af4793e606, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713949229 2024-11-27T13:25:52,336 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting afc0e1978a874b1f9663dd6d78b93d90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713949229 2024-11-27T13:25:52,336 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 244ef7873ef84e428aaccd126bc9f70a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732713949261 2024-11-27T13:25:52,337 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7846e38ee191420b91847fe5d0ee4702, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732713949261 2024-11-27T13:25:52,337 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dc3e29b7c364ceaa3e796599fa5eebd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732713949931 2024-11-27T13:25:52,337 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03657b2275b246d0acac4fb45d39feaa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732713949931 2024-11-27T13:25:52,345 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#A#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:52,345 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/58d9c022a52947d1b772aaf372af96e2 is 50, key is test_row_0/A:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:52,345 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#B#compaction#486 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:52,346 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/80d7b96ac6a04e7797a111b97f4cd59d is 50, key is test_row_0/B:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:52,348 DEBUG [Thread-2078 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:59011 2024-11-27T13:25:52,348 DEBUG [Thread-2078 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:52,348 DEBUG [Thread-2076 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:59011 2024-11-27T13:25:52,349 DEBUG [Thread-2080 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:59011 2024-11-27T13:25:52,349 DEBUG [Thread-2080 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:52,349 DEBUG [Thread-2076 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:52,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742388_1564 (size=13425) 2024-11-27T13:25:52,350 DEBUG [Thread-2074 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:59011 2024-11-27T13:25:52,350 DEBUG [Thread-2074 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:52,350 DEBUG [Thread-2082 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:59011 2024-11-27T13:25:52,350 DEBUG [Thread-2082 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:52,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742389_1565 (size=13425) 2024-11-27T13:25:52,436 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:52,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-27T13:25:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,437 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:52,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:52,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/82c082506ba94972ad3c9f2cb9aaaa87 is 50, key is test_row_0/A:col10/1732713951081/Put/seqid=0 2024-11-27T13:25:52,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742390_1566 (size=12301) 2024-11-27T13:25:52,753 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/58d9c022a52947d1b772aaf372af96e2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/58d9c022a52947d1b772aaf372af96e2 2024-11-27T13:25:52,756 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/80d7b96ac6a04e7797a111b97f4cd59d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/80d7b96ac6a04e7797a111b97f4cd59d 2024-11-27T13:25:52,757 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/A of da754bda7623eef518328888f8b63cf4 into 58d9c022a52947d1b772aaf372af96e2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:52,757 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:52,757 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/A, priority=12, startTime=1732713952334; duration=0sec 2024-11-27T13:25:52,757 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:25:52,757 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:A 2024-11-27T13:25:52,757 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-27T13:25:52,758 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-27T13:25:52,758 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): da754bda7623eef518328888f8b63cf4/C is initiating minor compaction (all files) 2024-11-27T13:25:52,758 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of da754bda7623eef518328888f8b63cf4/C in TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:52,758 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4dde7468f715487ab9a2036b407d9eba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp, totalSize=49.0 K 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dde7468f715487ab9a2036b407d9eba, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732713949196 2024-11-27T13:25:52,759 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/B of da754bda7623eef518328888f8b63cf4 into 80d7b96ac6a04e7797a111b97f4cd59d(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:52,759 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/B, priority=12, startTime=1732713952334; duration=0sec 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 289a47fa0b4747489fbd038181972921, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713949229 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:B 2024-11-27T13:25:52,759 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b6afb4f56484e86b5653da4a463a9f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732713949261 2024-11-27T13:25:52,760 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cae114581f8a420fbd2547b085c16d6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1732713949931 2024-11-27T13:25:52,765 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da754bda7623eef518328888f8b63cf4#C#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:25:52,765 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/af63c720f120400a99f36d5ffe303f76 is 50, key is test_row_0/C:col10/1732713949931/Put/seqid=0 2024-11-27T13:25:52,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742391_1567 (size=13425) 2024-11-27T13:25:52,845 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/82c082506ba94972ad3c9f2cb9aaaa87 2024-11-27T13:25:52,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/185fd85529f942deb9a1297cb9065e2a is 50, key is test_row_0/B:col10/1732713951081/Put/seqid=0 2024-11-27T13:25:52,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742392_1568 (size=12301) 2024-11-27T13:25:53,172 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/af63c720f120400a99f36d5ffe303f76 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/af63c720f120400a99f36d5ffe303f76 2024-11-27T13:25:53,176 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in da754bda7623eef518328888f8b63cf4/C of da754bda7623eef518328888f8b63cf4 into af63c720f120400a99f36d5ffe303f76(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:25:53,176 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:53,176 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4., storeName=da754bda7623eef518328888f8b63cf4/C, priority=12, startTime=1732713952334; duration=0sec 2024-11-27T13:25:53,177 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:25:53,177 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da754bda7623eef518328888f8b63cf4:C 2024-11-27T13:25:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:53,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. as already flushing 2024-11-27T13:25:53,232 DEBUG [Thread-2065 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:59011 2024-11-27T13:25:53,232 DEBUG [Thread-2065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:53,236 DEBUG [Thread-2069 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:59011 2024-11-27T13:25:53,236 DEBUG [Thread-2063 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:59011 2024-11-27T13:25:53,236 DEBUG [Thread-2069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:53,236 DEBUG [Thread-2063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:53,254 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/185fd85529f942deb9a1297cb9065e2a 2024-11-27T13:25:53,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/33b3cebd5c6a413a87b2e8001ab1e814 is 50, key is test_row_0/C:col10/1732713951081/Put/seqid=0 2024-11-27T13:25:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742393_1569 (size=12301) 2024-11-27T13:25:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:53,662 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/33b3cebd5c6a413a87b2e8001ab1e814 2024-11-27T13:25:53,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/82c082506ba94972ad3c9f2cb9aaaa87 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/82c082506ba94972ad3c9f2cb9aaaa87 2024-11-27T13:25:53,668 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/82c082506ba94972ad3c9f2cb9aaaa87, entries=150, sequenceid=466, filesize=12.0 K 2024-11-27T13:25:53,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/185fd85529f942deb9a1297cb9065e2a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/185fd85529f942deb9a1297cb9065e2a 2024-11-27T13:25:53,671 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/185fd85529f942deb9a1297cb9065e2a, entries=150, sequenceid=466, filesize=12.0 K 2024-11-27T13:25:53,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/33b3cebd5c6a413a87b2e8001ab1e814 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/33b3cebd5c6a413a87b2e8001ab1e814 2024-11-27T13:25:53,674 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/33b3cebd5c6a413a87b2e8001ab1e814, entries=150, sequenceid=466, filesize=12.0 K 2024-11-27T13:25:53,675 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=20.13 KB/20610 for da754bda7623eef518328888f8b63cf4 in 1238ms, sequenceid=466, compaction requested=false 2024-11-27T13:25:53,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:53,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:53,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-11-27T13:25:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-11-27T13:25:53,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-27T13:25:53,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4610 sec 2024-11-27T13:25:53,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 2.4640 sec 2024-11-27T13:25:55,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-27T13:25:55,318 INFO [Thread-2073 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-11-27T13:25:55,612 DEBUG [Thread-2071 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:59011 2024-11-27T13:25:55,612 DEBUG [Thread-2067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:59011 2024-11-27T13:25:55,612 DEBUG [Thread-2071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:55,612 DEBUG [Thread-2067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2407 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7220 rows 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2416 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7247 rows 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2408 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7221 rows 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2405 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7214 rows 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2415 2024-11-27T13:25:55,612 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7244 rows 2024-11-27T13:25:55,612 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:25:55,612 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:59011 2024-11-27T13:25:55,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:25:55,614 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:25:55,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:25:55,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:55,618 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713955618"}]},"ts":"1732713955618"} 2024-11-27T13:25:55,620 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:25:55,622 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:25:55,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:25:55,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, UNASSIGN}] 2024-11-27T13:25:55,624 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, UNASSIGN 2024-11-27T13:25:55,625 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=da754bda7623eef518328888f8b63cf4, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:55,625 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:25:55,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:55,777 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:55,777 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:55,777 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:25:55,777 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing da754bda7623eef518328888f8b63cf4, disabling compactions & flushes 2024-11-27T13:25:55,777 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. after waiting 0 ms 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:55,778 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(2837): Flushing da754bda7623eef518328888f8b63cf4 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=A 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=B 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK da754bda7623eef518328888f8b63cf4, store=C 2024-11-27T13:25:55,778 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:25:55,781 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/198defa919104990942736e531c3bc28 is 50, key is test_row_0/A:col10/1732713955611/Put/seqid=0 2024-11-27T13:25:55,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742394_1570 (size=9857) 2024-11-27T13:25:55,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:56,185 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/198defa919104990942736e531c3bc28 2024-11-27T13:25:56,190 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/3de549101172486cbd3aa923d6446ca5 is 50, key is test_row_0/B:col10/1732713955611/Put/seqid=0 2024-11-27T13:25:56,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742395_1571 (size=9857) 2024-11-27T13:25:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:56,594 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/3de549101172486cbd3aa923d6446ca5 2024-11-27T13:25:56,600 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/b35928709eaf4ebb8a9e18299b0bf8ea is 50, key is test_row_0/C:col10/1732713955611/Put/seqid=0 2024-11-27T13:25:56,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742396_1572 (size=9857) 2024-11-27T13:25:56,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:57,003 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/b35928709eaf4ebb8a9e18299b0bf8ea 2024-11-27T13:25:57,007 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/A/198defa919104990942736e531c3bc28 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/198defa919104990942736e531c3bc28 2024-11-27T13:25:57,009 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/198defa919104990942736e531c3bc28, entries=100, sequenceid=477, filesize=9.6 K 2024-11-27T13:25:57,010 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/B/3de549101172486cbd3aa923d6446ca5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3de549101172486cbd3aa923d6446ca5 2024-11-27T13:25:57,012 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3de549101172486cbd3aa923d6446ca5, entries=100, sequenceid=477, filesize=9.6 K 2024-11-27T13:25:57,013 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/.tmp/C/b35928709eaf4ebb8a9e18299b0bf8ea as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/b35928709eaf4ebb8a9e18299b0bf8ea 2024-11-27T13:25:57,015 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/b35928709eaf4ebb8a9e18299b0bf8ea, entries=100, sequenceid=477, filesize=9.6 K 2024-11-27T13:25:57,016 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for da754bda7623eef518328888f8b63cf4 in 1238ms, sequenceid=477, compaction requested=true 2024-11-27T13:25:57,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8a102f8f1b2f49d29aba2559917c9831, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/3b8ccb3c9536414599740ef181abfb66, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8ecdb2a86c874644bb6bc1189d510d53, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f293604544c64905b8fa22e88d4aa360, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a797fd040a5f426d8571bb419c045020, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/df0bc83b45de48259e5d67c4d8d09714, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/73d6a1619b5d420d9f960e05a264b9e5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/53d9c5b1365d495e9917b0d6f3320849, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f8f17552bd9d4e4dbcf64958f4a6c87a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa] to archive 2024-11-27T13:25:57,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:57,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8f3b11670edb4fb496c81c37ef8cf553 2024-11-27T13:25:57,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/d121b91678ed433084a5d6e39a7c8367 2024-11-27T13:25:57,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7e784ff900d6455bb098f861d8d13c09 2024-11-27T13:25:57,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/94cf7d36a3c6434bb475df12b6c74982 2024-11-27T13:25:57,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8a102f8f1b2f49d29aba2559917c9831 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8a102f8f1b2f49d29aba2559917c9831 2024-11-27T13:25:57,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/98da724df9154064855f5360c09d9392 2024-11-27T13:25:57,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/11548d77d8724a95a88f441e2ab2456b 2024-11-27T13:25:57,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/3b8ccb3c9536414599740ef181abfb66 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/3b8ccb3c9536414599740ef181abfb66 2024-11-27T13:25:57,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/ad6b948c0e56430ca11949098aa584e4 2024-11-27T13:25:57,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7781852f00474a0293450dc3669478dd 2024-11-27T13:25:57,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8ecdb2a86c874644bb6bc1189d510d53 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/8ecdb2a86c874644bb6bc1189d510d53 2024-11-27T13:25:57,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/32004ab467e748488271e67f66b7cef6 2024-11-27T13:25:57,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/adb00b5c7bcb4362838d2b7f8b2221bd 2024-11-27T13:25:57,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f293604544c64905b8fa22e88d4aa360 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f293604544c64905b8fa22e88d4aa360 2024-11-27T13:25:57,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f3dec0f5ae85417caf90f2931de70740 2024-11-27T13:25:57,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a4752ca02ee84c4e9ce7be14df60cdbb 2024-11-27T13:25:57,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a797fd040a5f426d8571bb419c045020 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/a797fd040a5f426d8571bb419c045020 2024-11-27T13:25:57,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/e9820b62f3d240eda3aac217909effdb 2024-11-27T13:25:57,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/df0bc83b45de48259e5d67c4d8d09714 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/df0bc83b45de48259e5d67c4d8d09714 2024-11-27T13:25:57,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/eacfbc813dc14131bc32a7af9f266c65 2024-11-27T13:25:57,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/57f451d49b864fe4ae76caba92d61c88 2024-11-27T13:25:57,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/18e2751929be4d9daa5ac75a57b8d255 2024-11-27T13:25:57,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7cd1cafb6aaf4847a97638109e764285 2024-11-27T13:25:57,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/73d6a1619b5d420d9f960e05a264b9e5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/73d6a1619b5d420d9f960e05a264b9e5 2024-11-27T13:25:57,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/aa71dd985b15427c88d66e31540b3a73 2024-11-27T13:25:57,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7903442b30cc4536aae9d299d0d8bbfb 2024-11-27T13:25:57,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/53d9c5b1365d495e9917b0d6f3320849 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/53d9c5b1365d495e9917b0d6f3320849 2024-11-27T13:25:57,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/1502bacadbec47f9960bc61c65606190 2024-11-27T13:25:57,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/4b9f627759c0466a8063855ed01e79ea 2024-11-27T13:25:57,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f8f17552bd9d4e4dbcf64958f4a6c87a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/f8f17552bd9d4e4dbcf64958f4a6c87a 2024-11-27T13:25:57,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/afc0e1978a874b1f9663dd6d78b93d90 2024-11-27T13:25:57,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/7846e38ee191420b91847fe5d0ee4702 2024-11-27T13:25:57,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/03657b2275b246d0acac4fb45d39feaa 2024-11-27T13:25:57,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3b2a3b471ddd4aaf89aaf6aaf268569b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/090cd310ac2a4d41b71987c06364efe8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b1510c9258e849f7a6f9f7d891552c80, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/cbf36e3def174aeab27608bc72716685, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/43efcd83a6a44356b8c79046fdde5b14, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed2b9ac2ad724571bee16ebd72c655f2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2fac48e9991c4ace8f4e1a806679f858, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7d8c8baf15234bd6aa17e9f354e695e0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/396a3dba44e944c79e37850dcc6fda64, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd] to archive 2024-11-27T13:25:57,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:57,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/24660acf42de478f80cfb44b549bcefd 2024-11-27T13:25:57,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f4e9a2c60fcb4e539ef7a4873645e2aa 2024-11-27T13:25:57,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b09f629385b449588e7fa6b2ae9ee3c8 2024-11-27T13:25:57,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3b2a3b471ddd4aaf89aaf6aaf268569b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3b2a3b471ddd4aaf89aaf6aaf268569b 2024-11-27T13:25:57,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/18c5ac84bdb14df9982e36fd8cb7cf91 2024-11-27T13:25:57,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7610e4c600e2403d8a279df167e91a6a 2024-11-27T13:25:57,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/090cd310ac2a4d41b71987c06364efe8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/090cd310ac2a4d41b71987c06364efe8 2024-11-27T13:25:57,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed50d1c6955241b48168545b3f963b70 2024-11-27T13:25:57,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/917df64a16e24e619230198607c9a4d4 2024-11-27T13:25:57,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b1510c9258e849f7a6f9f7d891552c80 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/b1510c9258e849f7a6f9f7d891552c80 2024-11-27T13:25:57,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/405aad971a664660ba153fff03c3b8a2 2024-11-27T13:25:57,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2ed429ceb96a43dc9365fd44d931a3f8 2024-11-27T13:25:57,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/cbf36e3def174aeab27608bc72716685 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/cbf36e3def174aeab27608bc72716685 2024-11-27T13:25:57,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/79501eb0b2134f48baacae7f06cce541 2024-11-27T13:25:57,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/c782c7713fcd4bc0933c19574f6bdc5a 2024-11-27T13:25:57,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/43efcd83a6a44356b8c79046fdde5b14 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/43efcd83a6a44356b8c79046fdde5b14 2024-11-27T13:25:57,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/f5e4440499e1484299ac4dc3e6e45563 2024-11-27T13:25:57,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/73ea7c9b65e44052b2b8748f0790afdb 2024-11-27T13:25:57,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed2b9ac2ad724571bee16ebd72c655f2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/ed2b9ac2ad724571bee16ebd72c655f2 2024-11-27T13:25:57,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/286e5a8fb58c404188d32a20eda9e71c 2024-11-27T13:25:57,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/0193dcf8935241b883ff8e4c238800ad 2024-11-27T13:25:57,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7754394acfab48abaf051fc0d67880c5 2024-11-27T13:25:57,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2fac48e9991c4ace8f4e1a806679f858 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2fac48e9991c4ace8f4e1a806679f858 2024-11-27T13:25:57,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/2a4384a2ac6048428f8171d64d3306a8 2024-11-27T13:25:57,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d19e137cbf84477396410d788d18f99c 2024-11-27T13:25:57,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7d8c8baf15234bd6aa17e9f354e695e0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/7d8c8baf15234bd6aa17e9f354e695e0 2024-11-27T13:25:57,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/11ed1181d99e4ba9a0f7b7705c7c3913 2024-11-27T13:25:57,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/479c965920744d169d7a0f15bd86d183 2024-11-27T13:25:57,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/396a3dba44e944c79e37850dcc6fda64 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/396a3dba44e944c79e37850dcc6fda64 2024-11-27T13:25:57,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/8fed15a728974356816707b188ff7e0d 2024-11-27T13:25:57,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/d7bd90e57f964f94b9b006af4793e606 2024-11-27T13:25:57,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/244ef7873ef84e428aaccd126bc9f70a 2024-11-27T13:25:57,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/4dc3e29b7c364ceaa3e796599fa5eebd 2024-11-27T13:25:57,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6cc63a98d77a4ed69e4b996cb6fc3df2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/074bbe8281c5440ea1b7031e3c7ebf78, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/216abe7698d548f5a9eba5ee03c57d36, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/18719e4d3f434e5c8a322acefd98ba73, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/2b47830804ec4399bf90f093a13e2c7b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/498e5210125e4cd2b10d7d09f3e1e793, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/168daedd83b341b6993aa7e17f8c7028, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/c35cbcced3f841dfa474369025535b58, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4dde7468f715487ab9a2036b407d9eba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a] to archive 2024-11-27T13:25:57,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:25:57,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/5d1e2e7795994fc5aaf0f7eb88583687 2024-11-27T13:25:57,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/431712205d90403dbe501e2cf5fc2b97 2024-11-27T13:25:57,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/7647a390bdb64047b169f346e0ab2c2c 2024-11-27T13:25:57,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6cc63a98d77a4ed69e4b996cb6fc3df2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6cc63a98d77a4ed69e4b996cb6fc3df2 2024-11-27T13:25:57,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/21c01a72f4db4985ab28f1c5bdfce47b 2024-11-27T13:25:57,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/db6c0fe036214538a998262e6ed950d5 2024-11-27T13:25:57,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/074bbe8281c5440ea1b7031e3c7ebf78 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/074bbe8281c5440ea1b7031e3c7ebf78 2024-11-27T13:25:57,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/17345be7287b46948c29fa20cd83637b 2024-11-27T13:25:57,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/96bc3f1f5cbf4f3fa6c5fab7583f5f6f 2024-11-27T13:25:57,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/216abe7698d548f5a9eba5ee03c57d36 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/216abe7698d548f5a9eba5ee03c57d36 2024-11-27T13:25:57,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e7be03aede6c4baf8335e7d52bcf87b7 2024-11-27T13:25:57,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fe761e1aa765429f914efd432f659ac6 2024-11-27T13:25:57,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/18719e4d3f434e5c8a322acefd98ba73 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/18719e4d3f434e5c8a322acefd98ba73 2024-11-27T13:25:57,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dde94669f4ad4a399f56701b762166de 2024-11-27T13:25:57,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/8facae7e46e14f4f978fa8ea7e3c38e8 2024-11-27T13:25:57,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/2b47830804ec4399bf90f093a13e2c7b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/2b47830804ec4399bf90f093a13e2c7b 2024-11-27T13:25:57,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3f029e4313be4147aaa6c3449728b7d1 2024-11-27T13:25:57,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/aa777b02cf704b10ba522a81ee487167 2024-11-27T13:25:57,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/498e5210125e4cd2b10d7d09f3e1e793 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/498e5210125e4cd2b10d7d09f3e1e793 2024-11-27T13:25:57,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/d974c4d5c5fe4837971487b972686e98 2024-11-27T13:25:57,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4ff1aac65d1a4860ad70101a660d8cd6 2024-11-27T13:25:57,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/675ec0932ebf442a99de8bc952246ab3 2024-11-27T13:25:57,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/168daedd83b341b6993aa7e17f8c7028 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/168daedd83b341b6993aa7e17f8c7028 2024-11-27T13:25:57,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/e4e9aae6927d4518b3ff10caf53bc069 2024-11-27T13:25:57,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/dae3d98de2e24de29047bfef09f039f0 2024-11-27T13:25:57,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/c35cbcced3f841dfa474369025535b58 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/c35cbcced3f841dfa474369025535b58 2024-11-27T13:25:57,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4d9f76e7839c47faa42b2e3f1b53902b 2024-11-27T13:25:57,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/fc50c114a75a41ebb08fbd2082e246cd 2024-11-27T13:25:57,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4dde7468f715487ab9a2036b407d9eba to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/4dde7468f715487ab9a2036b407d9eba 2024-11-27T13:25:57,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/3a05818d7f7a4421be98a2605a85e59f 2024-11-27T13:25:57,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/289a47fa0b4747489fbd038181972921 2024-11-27T13:25:57,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/6b6afb4f56484e86b5653da4a463a9f5 2024-11-27T13:25:57,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/cae114581f8a420fbd2547b085c16d6a 2024-11-27T13:25:57,112 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/recovered.edits/480.seqid, newMaxSeqId=480, maxSeqId=1 2024-11-27T13:25:57,113 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4. 2024-11-27T13:25:57,113 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for da754bda7623eef518328888f8b63cf4: 2024-11-27T13:25:57,114 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:57,114 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=da754bda7623eef518328888f8b63cf4, regionState=CLOSED 2024-11-27T13:25:57,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-27T13:25:57,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure da754bda7623eef518328888f8b63cf4, server=a0541979a851,32819,1732713812705 in 1.4900 sec 2024-11-27T13:25:57,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-27T13:25:57,117 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=da754bda7623eef518328888f8b63cf4, UNASSIGN in 1.4920 sec 2024-11-27T13:25:57,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-27T13:25:57,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4940 sec 2024-11-27T13:25:57,119 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713957119"}]},"ts":"1732713957119"} 2024-11-27T13:25:57,120 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:25:57,122 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:25:57,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5070 sec 2024-11-27T13:25:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-27T13:25:57,722 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-11-27T13:25:57,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:25:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,724 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-11-27T13:25:57,724 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=162, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,727 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:57,728 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/recovered.edits] 2024-11-27T13:25:57,730 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/198defa919104990942736e531c3bc28 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/198defa919104990942736e531c3bc28 2024-11-27T13:25:57,731 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/58d9c022a52947d1b772aaf372af96e2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/58d9c022a52947d1b772aaf372af96e2 2024-11-27T13:25:57,732 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/82c082506ba94972ad3c9f2cb9aaaa87 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/A/82c082506ba94972ad3c9f2cb9aaaa87 2024-11-27T13:25:57,734 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/185fd85529f942deb9a1297cb9065e2a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/185fd85529f942deb9a1297cb9065e2a 2024-11-27T13:25:57,734 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3de549101172486cbd3aa923d6446ca5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/3de549101172486cbd3aa923d6446ca5 2024-11-27T13:25:57,735 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/80d7b96ac6a04e7797a111b97f4cd59d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/B/80d7b96ac6a04e7797a111b97f4cd59d 2024-11-27T13:25:57,737 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/33b3cebd5c6a413a87b2e8001ab1e814 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/33b3cebd5c6a413a87b2e8001ab1e814 2024-11-27T13:25:57,737 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/af63c720f120400a99f36d5ffe303f76 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/af63c720f120400a99f36d5ffe303f76 2024-11-27T13:25:57,740 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/b35928709eaf4ebb8a9e18299b0bf8ea to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/C/b35928709eaf4ebb8a9e18299b0bf8ea 2024-11-27T13:25:57,742 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/recovered.edits/480.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4/recovered.edits/480.seqid 2024-11-27T13:25:57,742 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/da754bda7623eef518328888f8b63cf4 2024-11-27T13:25:57,742 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:25:57,744 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=162, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,745 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:25:57,747 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:25:57,748 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=162, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,748 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:25:57,748 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713957748"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:57,749 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:25:57,749 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => da754bda7623eef518328888f8b63cf4, NAME => 'TestAcidGuarantees,,1732713930181.da754bda7623eef518328888f8b63cf4.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:25:57,749 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:25:57,749 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713957749"}]},"ts":"9223372036854775807"} 2024-11-27T13:25:57,751 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:25:57,752 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=162, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-11-27T13:25:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-11-27T13:25:57,825 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 162 completed 2024-11-27T13:25:57,835 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 241), OpenFileDescriptor=453 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=424 (was 455), ProcessCount=11 (was 11), AvailableMemoryMB=4070 (was 4111) 2024-11-27T13:25:57,843 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=424, ProcessCount=11, AvailableMemoryMB=4070 2024-11-27T13:25:57,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:25:57,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:25:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:57,846 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T13:25:57,846 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:57,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 163 2024-11-27T13:25:57,847 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T13:25:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:57,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742397_1573 (size=963) 2024-11-27T13:25:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:58,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:58,253 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea 2024-11-27T13:25:58,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742398_1574 (size=53) 2024-11-27T13:25:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:58,658 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:58,658 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a5bdef3839c25f6b6634d128aa14c12e, disabling compactions & flushes 2024-11-27T13:25:58,658 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,658 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,658 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. after waiting 0 ms 2024-11-27T13:25:58,658 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,658 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:25:58,659 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T13:25:58,660 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732713958659"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732713958659"}]},"ts":"1732713958659"} 2024-11-27T13:25:58,660 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-27T13:25:58,661 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T13:25:58,661 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713958661"}]},"ts":"1732713958661"} 2024-11-27T13:25:58,662 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-27T13:25:58,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, ASSIGN}] 2024-11-27T13:25:58,666 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, ASSIGN 2024-11-27T13:25:58,667 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, ASSIGN; state=OFFLINE, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=false 2024-11-27T13:25:58,817 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:58,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:25:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:58,970 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:25:58,972 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,972 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:25:58,973 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,973 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:25:58,973 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,973 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,974 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,975 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:58,975 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName A 2024-11-27T13:25:58,975 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:58,976 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:58,976 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,977 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:58,977 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName B 2024-11-27T13:25:58,977 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:58,977 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:58,977 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,978 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:25:58,978 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName C 2024-11-27T13:25:58,978 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:25:58,978 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:25:58,978 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,979 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,979 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,980 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:25:58,981 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:25:58,982 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T13:25:58,983 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened a5bdef3839c25f6b6634d128aa14c12e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66094972, jitterRate=-0.015108168125152588}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:25:58,983 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:25:58,984 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., pid=165, masterSystemTime=1732713958970 2024-11-27T13:25:58,985 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,985 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:25:58,985 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=OPEN, openSeqNum=2, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:25:58,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-11-27T13:25:58,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 in 168 msec 2024-11-27T13:25:58,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-27T13:25:58,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, ASSIGN in 321 msec 2024-11-27T13:25:58,989 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T13:25:58,989 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713958989"}]},"ts":"1732713958989"} 2024-11-27T13:25:58,989 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-27T13:25:58,992 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T13:25:58,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-27T13:25:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-27T13:25:59,951 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-27T13:25:59,952 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-27T13:25:59,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:25:59,960 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:25:59,961 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:25:59,962 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T13:25:59,963 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T13:25:59,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-27T13:25:59,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T13:25:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=166, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-27T13:25:59,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742399_1575 (size=999) 2024-11-27T13:26:00,375 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-27T13:26:00,375 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-27T13:26:00,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:26:00,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, REOPEN/MOVE}] 2024-11-27T13:26:00,378 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, REOPEN/MOVE 2024-11-27T13:26:00,379 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,380 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:26:00,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE; CloseRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:26:00,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:00,532 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(124): Close a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,532 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:26:00,532 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1681): Closing a5bdef3839c25f6b6634d128aa14c12e, disabling compactions & flushes 2024-11-27T13:26:00,532 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,532 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,532 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. after waiting 0 ms 2024-11-27T13:26:00,532 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,535 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-27T13:26:00,536 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,536 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1635): Region close journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:00,536 WARN [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegionServer(3786): Not adding moved region record: a5bdef3839c25f6b6634d128aa14c12e to self. 2024-11-27T13:26:00,537 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(170): Closed a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=CLOSED 2024-11-27T13:26:00,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-11-27T13:26:00,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; CloseRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 in 158 msec 2024-11-27T13:26:00,540 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, REOPEN/MOVE; state=CLOSED, location=a0541979a851,32819,1732713812705; forceNewPlan=false, retain=true 2024-11-27T13:26:00,690 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=OPENING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=168, state=RUNNABLE; OpenRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:26:00,843 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:00,845 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,845 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7285): Opening region: {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} 2024-11-27T13:26:00,846 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,846 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T13:26:00,846 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7327): checking encryption for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,846 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7330): checking classloading for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,847 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,847 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:26:00,848 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName A 2024-11-27T13:26:00,848 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:00,849 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:26:00,849 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,850 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:26:00,850 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName B 2024-11-27T13:26:00,850 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:00,850 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:26:00,850 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,851 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-27T13:26:00,851 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a5bdef3839c25f6b6634d128aa14c12e columnFamilyName C 2024-11-27T13:26:00,851 DEBUG [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:00,851 INFO [StoreOpener-a5bdef3839c25f6b6634d128aa14c12e-1 {}] regionserver.HStore(327): Store=a5bdef3839c25f6b6634d128aa14c12e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T13:26:00,851 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,852 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,853 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,854 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T13:26:00,855 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1085): writing seq id for a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,855 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1102): Opened a5bdef3839c25f6b6634d128aa14c12e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67917253, jitterRate=0.012045934796333313}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T13:26:00,856 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1001): Region open journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:00,856 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., pid=170, masterSystemTime=1732713960842 2024-11-27T13:26:00,858 DEBUG [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,858 INFO [RS_OPEN_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:00,858 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=OPEN, openSeqNum=5, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=168 2024-11-27T13:26:00,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=168, state=SUCCESS; OpenRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 in 168 msec 2024-11-27T13:26:00,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-27T13:26:00,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, REOPEN/MOVE in 482 msec 2024-11-27T13:26:00,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-11-27T13:26:00,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-11-27T13:26:00,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 897 msec 2024-11-27T13:26:00,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=166 2024-11-27T13:26:00,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-11-27T13:26:00,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,869 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-11-27T13:26:00,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,872 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-11-27T13:26:00,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,876 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-27T13:26:00,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,879 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-27T13:26:00,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,882 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-11-27T13:26:00,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,886 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-11-27T13:26:00,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,890 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-11-27T13:26:00,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,894 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-11-27T13:26:00,898 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,898 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:59011 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-11-27T13:26:00,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T13:26:00,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-27T13:26:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:00,908 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:00,908 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:00,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:00,912 DEBUG [hconnection-0x4dcc327-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,912 DEBUG [hconnection-0x3e07a985-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,912 DEBUG [hconnection-0x4f9c4805-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,913 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,913 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,913 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,920 DEBUG [hconnection-0x5c5a71ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,921 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,927 DEBUG [hconnection-0x27ab1bec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,928 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,929 DEBUG [hconnection-0x33c3d672-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,930 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,930 DEBUG [hconnection-0x5dbe7c73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,930 DEBUG [hconnection-0x31c2e622-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,931 DEBUG [hconnection-0x12d55b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,932 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,932 DEBUG [hconnection-0x6321b39-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T13:26:00,932 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,932 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,933 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T13:26:00,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:26:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:00,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:00,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:00,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4533) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4953) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4947) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4943) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3233) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714020960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714020958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714020962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714020963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714020963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:00,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411276f2e660bc9234b5f910dfa6d299cfe64_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_1/A:col10/1732713960940/Put/seqid=0 2024-11-27T13:26:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742400_1576 (size=9714) 2024-11-27T13:26:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:01,053 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:26:01,060 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714021064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714021064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714021067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714021070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714021070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:01,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714021266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714021266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714021270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714021273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714021273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,365 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,405 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:01,409 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411276f2e660bc9234b5f910dfa6d299cfe64_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276f2e660bc9234b5f910dfa6d299cfe64_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:01,410 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/20d622612aee4ba688b7fd28638c915f, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:01,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/20d622612aee4ba688b7fd28638c915f is 175, key is test_row_1/A:col10/1732713960940/Put/seqid=0 2024-11-27T13:26:01,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742401_1577 (size=22361) 2024-11-27T13:26:01,415 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/20d622612aee4ba688b7fd28638c915f 2024-11-27T13:26:01,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/59b470302d5241df8116a67e8c4c9f1f is 50, key is test_row_1/B:col10/1732713960940/Put/seqid=0 2024-11-27T13:26:01,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742402_1578 (size=9657) 2024-11-27T13:26:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:01,519 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714021568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714021569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714021573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714021577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:01,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714021577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:01,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:01,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/59b470302d5241df8116a67e8c4c9f1f 2024-11-27T13:26:01,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/33f69d7cfc9543d593c0c8c3953fbe05 is 50, key is test_row_1/C:col10/1732713960940/Put/seqid=0 2024-11-27T13:26:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742403_1579 (size=9657) 2024-11-27T13:26:01,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/33f69d7cfc9543d593c0c8c3953fbe05 2024-11-27T13:26:01,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/20d622612aee4ba688b7fd28638c915f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f 2024-11-27T13:26:01,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f, entries=100, sequenceid=16, filesize=21.8 K 2024-11-27T13:26:01,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/59b470302d5241df8116a67e8c4c9f1f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f 2024-11-27T13:26:01,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f, entries=100, sequenceid=16, filesize=9.4 K 2024-11-27T13:26:01,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/33f69d7cfc9543d593c0c8c3953fbe05 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05 2024-11-27T13:26:01,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05, entries=100, sequenceid=16, filesize=9.4 K 2024-11-27T13:26:01,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a5bdef3839c25f6b6634d128aa14c12e in 953ms, sequenceid=16, compaction requested=false 2024-11-27T13:26:01,894 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-27T13:26:01,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:01,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:01,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-27T13:26:01,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:01,978 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:01,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f5afc0bbfa3f4f34a824f9b1733dce0e_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713960956/Put/seqid=0 2024-11-27T13:26:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742404_1580 (size=12154) 2024-11-27T13:26:02,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:02,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:02,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714022079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714022079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714022081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714022081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714022082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714022182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714022184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714022186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714022386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714022387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714022389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:02,403 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f5afc0bbfa3f4f34a824f9b1733dce0e_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5afc0bbfa3f4f34a824f9b1733dce0e_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:02,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/13a72d2ba4584301b814984ce224654f, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:02,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/13a72d2ba4584301b814984ce224654f is 175, key is test_row_0/A:col10/1732713960956/Put/seqid=0 2024-11-27T13:26:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742405_1581 (size=30955) 2024-11-27T13:26:02,635 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T13:26:02,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714022689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714022689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:02,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714022692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:02,809 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/13a72d2ba4584301b814984ce224654f 2024-11-27T13:26:02,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9d772179952f4180bd79f955711ba406 is 50, key is test_row_0/B:col10/1732713960956/Put/seqid=0 2024-11-27T13:26:02,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742406_1582 (size=12001) 2024-11-27T13:26:02,826 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9d772179952f4180bd79f955711ba406 2024-11-27T13:26:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/68bde37ea3294453aa4e5127dcc3e612 is 50, key is test_row_0/C:col10/1732713960956/Put/seqid=0 2024-11-27T13:26:02,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742407_1583 (size=12001) 2024-11-27T13:26:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:03,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714023088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:03,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714023092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:03,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714023192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:03,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714023194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:03,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714023196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:03,237 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/68bde37ea3294453aa4e5127dcc3e612 2024-11-27T13:26:03,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/13a72d2ba4584301b814984ce224654f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f 2024-11-27T13:26:03,244 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f, entries=150, sequenceid=41, filesize=30.2 K 2024-11-27T13:26:03,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9d772179952f4180bd79f955711ba406 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406 2024-11-27T13:26:03,248 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T13:26:03,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/68bde37ea3294453aa4e5127dcc3e612 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612 2024-11-27T13:26:03,252 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612, entries=150, sequenceid=41, filesize=11.7 K 2024-11-27T13:26:03,252 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for a5bdef3839c25f6b6634d128aa14c12e in 1274ms, sequenceid=41, compaction requested=false 2024-11-27T13:26:03,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:03,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:03,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-27T13:26:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-27T13:26:03,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-27T13:26:03,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3520 sec 2024-11-27T13:26:03,263 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.3550 sec 2024-11-27T13:26:04,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:04,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:04,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:04,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c0f3b6bd08ef4f33a1b0f73f5aa675af_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:04,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742408_1584 (size=12154) 2024-11-27T13:26:04,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714024257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714024257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714024258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714024360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714024360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714024360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714024563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714024563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714024564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,615 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:04,619 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c0f3b6bd08ef4f33a1b0f73f5aa675af_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c0f3b6bd08ef4f33a1b0f73f5aa675af_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:04,620 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5b2970d7713e439c8a3e3546492eea47, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:04,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5b2970d7713e439c8a3e3546492eea47 is 175, key is test_row_0/A:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:04,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742409_1585 (size=30955) 2024-11-27T13:26:04,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714024867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714024867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:04,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714024868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-27T13:26:05,012 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-27T13:26:05,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-27T13:26:05,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-27T13:26:05,015 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:05,015 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:05,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:05,025 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5b2970d7713e439c8a3e3546492eea47 2024-11-27T13:26:05,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/fc3693937fb14469aca07e3ad92afdcf is 50, key is test_row_0/B:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:05,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742410_1586 (size=12001) 2024-11-27T13:26:05,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:05,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714025094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,096 DEBUG [Thread-2537 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:05,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:05,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714025108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,109 DEBUG [Thread-2541 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:05,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-27T13:26:05,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-27T13:26:05,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:05,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714025369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714025372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714025373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:05,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/fc3693937fb14469aca07e3ad92afdcf 2024-11-27T13:26:05,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/97cd6f8e97e94a4f866ab5015f8af0b1 is 50, key is test_row_0/C:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742411_1587 (size=12001) 2024-11-27T13:26:05,475 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:05,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-27T13:26:05,628 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:05,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:05,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:05,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/97cd6f8e97e94a4f866ab5015f8af0b1 2024-11-27T13:26:05,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5b2970d7713e439c8a3e3546492eea47 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47 2024-11-27T13:26:05,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47, entries=150, sequenceid=54, filesize=30.2 K 2024-11-27T13:26:05,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/fc3693937fb14469aca07e3ad92afdcf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf 2024-11-27T13:26:05,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf, entries=150, sequenceid=54, filesize=11.7 K 2024-11-27T13:26:05,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/97cd6f8e97e94a4f866ab5015f8af0b1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1 2024-11-27T13:26:05,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1, entries=150, sequenceid=54, filesize=11.7 K 2024-11-27T13:26:05,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a5bdef3839c25f6b6634d128aa14c12e in 1664ms, sequenceid=54, compaction requested=true 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:05,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:05,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:05,868 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:05,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:05,869 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:05,870 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,870 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=32.9 K 2024-11-27T13:26:05,870 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84271 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:05,870 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:05,870 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,870 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=82.3 K 2024-11-27T13:26:05,870 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,870 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47] 2024-11-27T13:26:05,871 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 59b470302d5241df8116a67e8c4c9f1f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713960939 2024-11-27T13:26:05,871 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20d622612aee4ba688b7fd28638c915f, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713960939 2024-11-27T13:26:05,871 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d772179952f4180bd79f955711ba406, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713960956 2024-11-27T13:26:05,871 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13a72d2ba4584301b814984ce224654f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713960956 2024-11-27T13:26:05,871 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fc3693937fb14469aca07e3ad92afdcf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:05,872 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b2970d7713e439c8a3e3546492eea47, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:05,878 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:05,881 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#505 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:05,881 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/f399f79cbb1e496c85c63d639d2e9557 is 50, key is test_row_0/B:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:05,901 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127f17080895b914e34addacb7c44583a47_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:05,903 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127f17080895b914e34addacb7c44583a47_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:05,903 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f17080895b914e34addacb7c44583a47_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:05,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742412_1588 (size=12104) 2024-11-27T13:26:05,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742413_1589 (size=4469) 2024-11-27T13:26:05,927 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#504 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:05,928 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/bea466d98d3440a0832df43701bbb117 is 175, key is test_row_0/A:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:05,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742414_1590 (size=31058) 2024-11-27T13:26:05,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:05,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-27T13:26:05,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,935 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:05,941 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/bea466d98d3440a0832df43701bbb117 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117 2024-11-27T13:26:05,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ecb0ac0642734ca689e08de06a8bbaee_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713964256/Put/seqid=0 2024-11-27T13:26:05,948 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into bea466d98d3440a0832df43701bbb117(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:05,948 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:05,948 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713965868; duration=0sec 2024-11-27T13:26:05,948 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:05,948 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:05,948 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:05,950 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:05,950 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:05,950 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:05,950 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=32.9 K 2024-11-27T13:26:05,950 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33f69d7cfc9543d593c0c8c3953fbe05, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732713960939 2024-11-27T13:26:05,950 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68bde37ea3294453aa4e5127dcc3e612, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732713960956 2024-11-27T13:26:05,951 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97cd6f8e97e94a4f866ab5015f8af0b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:05,957 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#507 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:05,957 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe is 50, key is test_row_0/C:col10/1732713962081/Put/seqid=0 2024-11-27T13:26:05,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742415_1591 (size=12154) 2024-11-27T13:26:05,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:05,964 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127ecb0ac0642734ca689e08de06a8bbaee_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ecb0ac0642734ca689e08de06a8bbaee_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:05,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e2ff89c33652428792c90c6aca736085, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:05,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e2ff89c33652428792c90c6aca736085 is 175, key is test_row_0/A:col10/1732713964256/Put/seqid=0 2024-11-27T13:26:05,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742416_1592 (size=12104) 2024-11-27T13:26:05,985 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe 2024-11-27T13:26:05,990 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into 60c5bd483ccf4f11a4e7a4f49d3c0bfe(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:05,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:05,990 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713965868; duration=0sec 2024-11-27T13:26:05,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:05,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:05,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742417_1593 (size=30955) 2024-11-27T13:26:05,997 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e2ff89c33652428792c90c6aca736085 2024-11-27T13:26:06,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/c3557fd170ed4c66a273d60955c0eb8f is 50, key is test_row_0/B:col10/1732713964256/Put/seqid=0 2024-11-27T13:26:06,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742418_1594 (size=12001) 2024-11-27T13:26:06,010 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/c3557fd170ed4c66a273d60955c0eb8f 2024-11-27T13:26:06,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/72a202af7be940b6b836d78a33c48d06 is 50, key is test_row_0/C:col10/1732713964256/Put/seqid=0 2024-11-27T13:26:06,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742419_1595 (size=12001) 2024-11-27T13:26:06,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/72a202af7be940b6b836d78a33c48d06 2024-11-27T13:26:06,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e2ff89c33652428792c90c6aca736085 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085 2024-11-27T13:26:06,030 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085, entries=150, sequenceid=77, filesize=30.2 K 2024-11-27T13:26:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/c3557fd170ed4c66a273d60955c0eb8f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f 2024-11-27T13:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,034 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T13:26:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/72a202af7be940b6b836d78a33c48d06 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06 2024-11-27T13:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,038 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06, entries=150, sequenceid=77, filesize=11.7 K 2024-11-27T13:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,039 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for a5bdef3839c25f6b6634d128aa14c12e in 105ms, sequenceid=77, compaction requested=false 2024-11-27T13:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:06,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-27T13:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-27T13:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-27T13:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0250 sec 2024-11-27T13:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.0290 sec 2024-11-27T13:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-27T13:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,124 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-27T13:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-27T13:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,127 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:06,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,128 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-27T13:26:06,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-27T13:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-27T13:26:06,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:06,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-27T13:26:06,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-27T13:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 158 msec 2024-11-27T13:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,313 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/f399f79cbb1e496c85c63d639d2e9557 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/f399f79cbb1e496c85c63d639d2e9557 2024-11-27T13:26:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,318 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into f399f79cbb1e496c85c63d639d2e9557(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:06,319 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:06,319 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713965868; duration=0sec 2024-11-27T13:26:06,319 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,319 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-27T13:26:06,430 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-27T13:26:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:06,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:06,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:06,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-27T13:26:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,433 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,433 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:06,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275b96d3504e9b4df39df473a49d2ea6a5_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742421_1597 (size=24358) 2024-11-27T13:26:06,477 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:06,482 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275b96d3504e9b4df39df473a49d2ea6a5_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275b96d3504e9b4df39df473a49d2ea6a5_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:06,483 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e1d007dae82349f8ba1dc9cd0214a3ae, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:06,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e1d007dae82349f8ba1dc9cd0214a3ae is 175, key is test_row_0/A:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714026479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742420_1596 (size=73994) 2024-11-27T13:26:06,486 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e1d007dae82349f8ba1dc9cd0214a3ae 2024-11-27T13:26:06,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714026484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714026484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/04ffc17355544e7c8fc54cb72abf4e83 is 50, key is test_row_0/B:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:06,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742422_1598 (size=12001) 2024-11-27T13:26:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:06,585 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:06,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-27T13:26:06,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:06,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714026586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714026589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714026589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:06,738 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:06,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-27T13:26:06,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:06,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714026788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714026792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:06,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714026793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:06,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:06,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-27T13:26:06,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:06,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/04ffc17355544e7c8fc54cb72abf4e83 2024-11-27T13:26:06,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/cc771dd61337416d9ac168eab90383f4 is 50, key is test_row_0/C:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:06,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742423_1599 (size=12001) 2024-11-27T13:26:06,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/cc771dd61337416d9ac168eab90383f4 2024-11-27T13:26:06,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/e1d007dae82349f8ba1dc9cd0214a3ae as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae 2024-11-27T13:26:06,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae, entries=400, sequenceid=92, filesize=72.3 K 2024-11-27T13:26:06,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/04ffc17355544e7c8fc54cb72abf4e83 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83 2024-11-27T13:26:06,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83, entries=150, sequenceid=92, filesize=11.7 K 2024-11-27T13:26:06,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/cc771dd61337416d9ac168eab90383f4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4 2024-11-27T13:26:06,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4, entries=150, sequenceid=92, filesize=11.7 K 2024-11-27T13:26:06,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for a5bdef3839c25f6b6634d128aa14c12e in 540ms, sequenceid=92, compaction requested=true 2024-11-27T13:26:06,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:06,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:06,971 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:06,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:06,971 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:06,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:06,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:06,972 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136007 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:06,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:06,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:06,972 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:06,972 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,972 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=132.8 K 2024-11-27T13:26:06,972 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,972 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae] 2024-11-27T13:26:06,973 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:06,973 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bea466d98d3440a0832df43701bbb117, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:06,973 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:06,973 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:06,973 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/f399f79cbb1e496c85c63d639d2e9557, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.3 K 2024-11-27T13:26:06,973 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2ff89c33652428792c90c6aca736085, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713964255 2024-11-27T13:26:06,974 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting f399f79cbb1e496c85c63d639d2e9557, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:06,974 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1d007dae82349f8ba1dc9cd0214a3ae, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966389 2024-11-27T13:26:06,974 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c3557fd170ed4c66a273d60955c0eb8f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713964255 2024-11-27T13:26:06,975 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 04ffc17355544e7c8fc54cb72abf4e83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966418 2024-11-27T13:26:06,986 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#513 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:06,986 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/8524323c66584aac90a739f51904ebbb is 50, key is test_row_0/B:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:06,992 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:06,995 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411274739948a016647e093fbebaffc6168b5_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:06,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742424_1600 (size=12207) 2024-11-27T13:26:06,998 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411274739948a016647e093fbebaffc6168b5_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:06,998 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274739948a016647e093fbebaffc6168b5_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:07,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742425_1601 (size=4469) 2024-11-27T13:26:07,003 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#514 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:07,004 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/8f52fc1a08db47c4a623c6727859efba is 175, key is test_row_0/A:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:07,008 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/8524323c66584aac90a739f51904ebbb as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8524323c66584aac90a739f51904ebbb 2024-11-27T13:26:07,012 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 8524323c66584aac90a739f51904ebbb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:07,012 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:07,012 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713966971; duration=0sec 2024-11-27T13:26:07,012 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:07,012 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:07,012 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:07,013 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:07,013 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:07,013 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:07,013 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.3 K 2024-11-27T13:26:07,014 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 60c5bd483ccf4f11a4e7a4f49d3c0bfe, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732713962078 2024-11-27T13:26:07,014 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 72a202af7be940b6b836d78a33c48d06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732713964255 2024-11-27T13:26:07,014 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting cc771dd61337416d9ac168eab90383f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966418 2024-11-27T13:26:07,030 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#515 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:07,031 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/814ecbdd831245619023713430f36003 is 50, key is test_row_0/C:col10/1732713966427/Put/seqid=0 2024-11-27T13:26:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742426_1602 (size=31161) 2024-11-27T13:26:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:07,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742427_1603 (size=12207) 2024-11-27T13:26:07,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:07,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-27T13:26:07,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:07,044 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:07,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,051 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/814ecbdd831245619023713430f36003 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/814ecbdd831245619023713430f36003 2024-11-27T13:26:07,057 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into 814ecbdd831245619023713430f36003(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:07,057 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:07,057 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713966972; duration=0sec 2024-11-27T13:26:07,057 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:07,057 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:07,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c07b4f4fed644dee9cb2d3d6a25772fd_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713966450/Put/seqid=0 2024-11-27T13:26:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742428_1604 (size=12154) 2024-11-27T13:26:07,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:07,087 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127c07b4f4fed644dee9cb2d3d6a25772fd_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c07b4f4fed644dee9cb2d3d6a25772fd_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:07,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5ac8690c80dd47b480b12d2d1c6a5f33, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:07,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5ac8690c80dd47b480b12d2d1c6a5f33 is 175, key is test_row_0/A:col10/1732713966450/Put/seqid=0 2024-11-27T13:26:07,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:07,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742429_1605 (size=30955) 2024-11-27T13:26:07,101 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5ac8690c80dd47b480b12d2d1c6a5f33 2024-11-27T13:26:07,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/625eadfe4b514c678298c526af436bac is 50, key is test_row_0/B:col10/1732713966450/Put/seqid=0 2024-11-27T13:26:07,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714027105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714027108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714027109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742430_1606 (size=12001) 2024-11-27T13:26:07,135 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/625eadfe4b514c678298c526af436bac 2024-11-27T13:26:07,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/3b1443c4e8c4431d9618c9e5db033fc9 is 50, key is test_row_0/C:col10/1732713966450/Put/seqid=0 2024-11-27T13:26:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742431_1607 (size=12001) 2024-11-27T13:26:07,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714027212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714027212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714027212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714027415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714027415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714027415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,439 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/8f52fc1a08db47c4a623c6727859efba as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba 2024-11-27T13:26:07,442 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into 8f52fc1a08db47c4a623c6727859efba(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:07,442 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:07,442 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713966971; duration=0sec 2024-11-27T13:26:07,443 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:07,443 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:07,574 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/3b1443c4e8c4431d9618c9e5db033fc9 2024-11-27T13:26:07,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5ac8690c80dd47b480b12d2d1c6a5f33 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33 2024-11-27T13:26:07,582 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33, entries=150, sequenceid=119, filesize=30.2 K 2024-11-27T13:26:07,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/625eadfe4b514c678298c526af436bac as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac 2024-11-27T13:26:07,586 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac, entries=150, sequenceid=119, filesize=11.7 K 2024-11-27T13:26:07,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/3b1443c4e8c4431d9618c9e5db033fc9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9 2024-11-27T13:26:07,591 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9, entries=150, sequenceid=119, filesize=11.7 K 2024-11-27T13:26:07,592 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a5bdef3839c25f6b6634d128aa14c12e in 548ms, sequenceid=119, compaction requested=false 2024-11-27T13:26:07,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:07,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:07,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-27T13:26:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-27T13:26:07,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-27T13:26:07,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1590 sec 2024-11-27T13:26:07,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.1650 sec 2024-11-27T13:26:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:07,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-27T13:26:07,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:07,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:07,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:07,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:07,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127dd564ecedaa74a20999d04a293e157ab_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:07,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742432_1608 (size=14744) 2024-11-27T13:26:07,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714027747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714027750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714027750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714027851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714027853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:07,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714027855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714028056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714028057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714028057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,137 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:08,140 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127dd564ecedaa74a20999d04a293e157ab_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dd564ecedaa74a20999d04a293e157ab_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:08,141 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ab95cc41578e45928a1c9d2d184f0acd, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:08,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ab95cc41578e45928a1c9d2d184f0acd is 175, key is test_row_0/A:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:08,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742433_1609 (size=39699) 2024-11-27T13:26:08,146 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ab95cc41578e45928a1c9d2d184f0acd 2024-11-27T13:26:08,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/1e6a21f3d2e1435fb8855e887cd36ef4 is 50, key is test_row_0/B:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:08,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742434_1610 (size=12101) 2024-11-27T13:26:08,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714028359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714028360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714028361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-27T13:26:08,537 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-27T13:26:08,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:08,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-27T13:26:08,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:08,540 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:08,540 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:08,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:08,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/1e6a21f3d2e1435fb8855e887cd36ef4 2024-11-27T13:26:08,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/105e02d0200a4faaaf93a05b58577995 is 50, key is test_row_0/C:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:08,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742435_1611 (size=12101) 2024-11-27T13:26:08,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:08,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:08,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-27T13:26:08,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:08,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:08,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:08,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-27T13:26:08,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:08,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:08,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714028863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714028866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714028869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:08,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/105e02d0200a4faaaf93a05b58577995 2024-11-27T13:26:08,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ab95cc41578e45928a1c9d2d184f0acd as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd 2024-11-27T13:26:08,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd, entries=200, sequenceid=133, filesize=38.8 K 2024-11-27T13:26:08,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/1e6a21f3d2e1435fb8855e887cd36ef4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4 2024-11-27T13:26:08,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4, entries=150, sequenceid=133, filesize=11.8 K 2024-11-27T13:26:08,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/105e02d0200a4faaaf93a05b58577995 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995 2024-11-27T13:26:08,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995, entries=150, sequenceid=133, filesize=11.8 K 2024-11-27T13:26:08,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a5bdef3839c25f6b6634d128aa14c12e in 1268ms, sequenceid=133, compaction requested=true 2024-11-27T13:26:08,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:08,988 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:08,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:08,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:08,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:08,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:08,989 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:08,989 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=99.4 K 2024-11-27T13:26:08,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:08,989 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd] 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:08,989 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:08,990 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,990 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8524323c66584aac90a739f51904ebbb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.5 K 2024-11-27T13:26:08,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f52fc1a08db47c4a623c6727859efba, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966418 2024-11-27T13:26:08,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ac8690c80dd47b480b12d2d1c6a5f33, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732713966450 2024-11-27T13:26:08,990 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8524323c66584aac90a739f51904ebbb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966418 2024-11-27T13:26:08,990 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab95cc41578e45928a1c9d2d184f0acd, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:08,990 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 625eadfe4b514c678298c526af436bac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732713966450 2024-11-27T13:26:08,991 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e6a21f3d2e1435fb8855e887cd36ef4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:08,996 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:08,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:08,998 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127a5fd67f1f0d4469c848c024ddef73ef3_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:08,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-27T13:26:08,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:08,998 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-27T13:26:08,998 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#523 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:08,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:08,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:08,999 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/c6a12cb23d35426d88f947bf7edc4666 is 50, key is test_row_0/B:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:09,000 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127a5fd67f1f0d4469c848c024ddef73ef3_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:09,001 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a5fd67f1f0d4469c848c024ddef73ef3_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:09,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742436_1612 (size=12409) 2024-11-27T13:26:09,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742437_1613 (size=4469) 2024-11-27T13:26:09,007 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#522 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:09,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275fcdf9bcad2b44ff834a88c71557cb7a_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713967748/Put/seqid=0 2024-11-27T13:26:09,008 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/baac7da93b5c4a92aa49e454001ebbfe is 175, key is test_row_0/A:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:09,011 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/c6a12cb23d35426d88f947bf7edc4666 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c6a12cb23d35426d88f947bf7edc4666 2024-11-27T13:26:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742438_1614 (size=31363) 2024-11-27T13:26:09,016 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into c6a12cb23d35426d88f947bf7edc4666(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:09,016 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:09,016 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713968988; duration=0sec 2024-11-27T13:26:09,016 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:09,016 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:09,016 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:09,017 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:09,017 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:09,017 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:09,017 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/814ecbdd831245619023713430f36003, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.5 K 2024-11-27T13:26:09,018 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 814ecbdd831245619023713430f36003, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732713966418 2024-11-27T13:26:09,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742439_1615 (size=12304) 2024-11-27T13:26:09,018 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b1443c4e8c4431d9618c9e5db033fc9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732713966450 2024-11-27T13:26:09,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:09,019 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 105e02d0200a4faaaf93a05b58577995, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:09,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411275fcdf9bcad2b44ff834a88c71557cb7a_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275fcdf9bcad2b44ff834a88c71557cb7a_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:09,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ce10c9c2c4d849d087c0541eded064df, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:09,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ce10c9c2c4d849d087c0541eded064df is 175, key is test_row_0/A:col10/1732713967748/Put/seqid=0 2024-11-27T13:26:09,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742440_1616 (size=31105) 2024-11-27T13:26:09,031 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:09,032 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/dfaddec3ac2b4983935d49d37ed235ff is 50, key is test_row_0/C:col10/1732713967100/Put/seqid=0 2024-11-27T13:26:09,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742441_1617 (size=12409) 2024-11-27T13:26:09,042 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/dfaddec3ac2b4983935d49d37ed235ff as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/dfaddec3ac2b4983935d49d37ed235ff 2024-11-27T13:26:09,046 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into dfaddec3ac2b4983935d49d37ed235ff(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:09,046 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:09,046 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713968989; duration=0sec 2024-11-27T13:26:09,046 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:09,047 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:09,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:09,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:09,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714029136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:09,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714029147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,149 DEBUG [Thread-2541 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8188 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:09,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714029239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,420 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/baac7da93b5c4a92aa49e454001ebbfe as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe 2024-11-27T13:26:09,426 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into baac7da93b5c4a92aa49e454001ebbfe(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:09,426 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:09,426 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713968988; duration=0sec 2024-11-27T13:26:09,426 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:09,426 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:09,430 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ce10c9c2c4d849d087c0541eded064df 2024-11-27T13:26:09,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9cf7a30f63bf41dea6971da47f1a6e5e is 50, key is test_row_0/B:col10/1732713967748/Put/seqid=0 2024-11-27T13:26:09,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742442_1618 (size=12151) 2024-11-27T13:26:09,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714029442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:09,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714029745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,841 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9cf7a30f63bf41dea6971da47f1a6e5e 2024-11-27T13:26:09,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fcf186c5903642a98862acc931445b57 is 50, key is test_row_0/C:col10/1732713967748/Put/seqid=0 2024-11-27T13:26:09,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742443_1619 (size=12151) 2024-11-27T13:26:09,852 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fcf186c5903642a98862acc931445b57 2024-11-27T13:26:09,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/ce10c9c2c4d849d087c0541eded064df as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df 2024-11-27T13:26:09,858 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df, entries=150, sequenceid=158, filesize=30.4 K 2024-11-27T13:26:09,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9cf7a30f63bf41dea6971da47f1a6e5e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e 2024-11-27T13:26:09,863 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e, entries=150, sequenceid=158, filesize=11.9 K 2024-11-27T13:26:09,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fcf186c5903642a98862acc931445b57 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57 2024-11-27T13:26:09,869 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57, entries=150, sequenceid=158, filesize=11.9 K 2024-11-27T13:26:09,870 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a5bdef3839c25f6b6634d128aa14c12e in 872ms, sequenceid=158, compaction requested=false 2024-11-27T13:26:09,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:09,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:09,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-27T13:26:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-27T13:26:09,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-27T13:26:09,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3300 sec 2024-11-27T13:26:09,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 1.3360 sec 2024-11-27T13:26:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:09,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:09,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:09,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127bc88bea5309d4b93843865a5de601f05_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:09,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742444_1620 (size=14794) 2024-11-27T13:26:09,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714029902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714029905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:09,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714029905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714030006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714030009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714030010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714030210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714030213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714030214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714030247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,290 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:10,293 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127bc88bea5309d4b93843865a5de601f05_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bc88bea5309d4b93843865a5de601f05_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:10,294 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/3c1cb5762d6f48f49927713d59235fba, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:10,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/3c1cb5762d6f48f49927713d59235fba is 175, key is test_row_0/A:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:10,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742445_1621 (size=39749) 2024-11-27T13:26:10,299 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/3c1cb5762d6f48f49927713d59235fba 2024-11-27T13:26:10,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4b924c4d65d14ca1b09e167212a29621 is 50, key is test_row_0/B:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:10,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742446_1622 (size=12151) 2024-11-27T13:26:10,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714030513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714030516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714030517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:10,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-27T13:26:10,643 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-27T13:26:10,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:10,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-11-27T13:26:10,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:10,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:10,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:10,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:10,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4b924c4d65d14ca1b09e167212a29621 2024-11-27T13:26:10,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/46392fd8b16d465b89d99388a545bbe4 is 50, key is test_row_0/C:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:10,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742447_1623 (size=12151) 2024-11-27T13:26:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:10,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:10,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:10,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:10,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:10,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:10,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:10,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:10,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:10,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:10,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:10,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:10,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:10,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:10,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:10,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:10,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:10,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714031017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714031018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714031021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/46392fd8b16d465b89d99388a545bbe4 2024-11-27T13:26:11,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/3c1cb5762d6f48f49927713d59235fba as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba 2024-11-27T13:26:11,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba, entries=200, sequenceid=174, filesize=38.8 K 2024-11-27T13:26:11,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4b924c4d65d14ca1b09e167212a29621 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621 2024-11-27T13:26:11,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621, entries=150, sequenceid=174, filesize=11.9 K 2024-11-27T13:26:11,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/46392fd8b16d465b89d99388a545bbe4 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4 2024-11-27T13:26:11,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4, entries=150, sequenceid=174, filesize=11.9 K 2024-11-27T13:26:11,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a5bdef3839c25f6b6634d128aa14c12e in 1263ms, sequenceid=174, compaction requested=true 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,140 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:11,140 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,141 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,141 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,141 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:11,141 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:11,141 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,141 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,141 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=99.8 K 2024-11-27T13:26:11,141 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c6a12cb23d35426d88f947bf7edc4666, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.9 K 2024-11-27T13:26:11,141 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,141 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba] 2024-11-27T13:26:11,142 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting baac7da93b5c4a92aa49e454001ebbfe, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:11,142 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a12cb23d35426d88f947bf7edc4666, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:11,142 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cf7a30f63bf41dea6971da47f1a6e5e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732713967745 2024-11-27T13:26:11,142 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce10c9c2c4d849d087c0541eded064df, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732713967745 2024-11-27T13:26:11,142 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b924c4d65d14ca1b09e167212a29621, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,143 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c1cb5762d6f48f49927713d59235fba, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,151 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#531 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,152 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/b8c0734f812843509189e876fe647a89 is 50, key is test_row_0/B:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:11,155 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,157 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411270d2f9e8f91834e80ab2d79bb1a76c0ca_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,159 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411270d2f9e8f91834e80ab2d79bb1a76c0ca_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,159 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411270d2f9e8f91834e80ab2d79bb1a76c0ca_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742448_1624 (size=12561) 2024-11-27T13:26:11,179 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/b8c0734f812843509189e876fe647a89 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b8c0734f812843509189e876fe647a89 2024-11-27T13:26:11,183 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into b8c0734f812843509189e876fe647a89(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:11,183 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,183 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713971140; duration=0sec 2024-11-27T13:26:11,183 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:11,183 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:11,183 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,184 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,184 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:11,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742449_1625 (size=4469) 2024-11-27T13:26:11,184 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,184 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/dfaddec3ac2b4983935d49d37ed235ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=35.9 K 2024-11-27T13:26:11,188 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting dfaddec3ac2b4983935d49d37ed235ff, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732713967100 2024-11-27T13:26:11,189 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#532 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,189 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fcf186c5903642a98862acc931445b57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732713967745 2024-11-27T13:26:11,189 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/0ff1f282ca514737a025e74da87e040a is 175, key is test_row_0/A:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:11,190 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 46392fd8b16d465b89d99388a545bbe4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,197 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#533 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742450_1626 (size=31515) 2024-11-27T13:26:11,198 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/6705d6fe9b3d49178350fa5cc2dd0a85 is 50, key is test_row_0/C:col10/1732713969122/Put/seqid=0 2024-11-27T13:26:11,203 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/0ff1f282ca514737a025e74da87e040a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a 2024-11-27T13:26:11,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742451_1627 (size=12561) 2024-11-27T13:26:11,209 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into 0ff1f282ca514737a025e74da87e040a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:11,209 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,209 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713971140; duration=0sec 2024-11-27T13:26:11,209 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,209 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:11,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:11,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,257 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112788aa4c7a401a4377a58ac06acb3ee510_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713969904/Put/seqid=0 2024-11-27T13:26:11,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742452_1628 (size=14794) 2024-11-27T13:26:11,264 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,267 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112788aa4c7a401a4377a58ac06acb3ee510_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112788aa4c7a401a4377a58ac06acb3ee510_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:11,268 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/1fa55ee469fa4d3dbd366832eee7f1c3, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/1fa55ee469fa4d3dbd366832eee7f1c3 is 175, key is test_row_0/A:col10/1732713969904/Put/seqid=0 2024-11-27T13:26:11,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742453_1629 (size=39749) 2024-11-27T13:26:11,277 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/1fa55ee469fa4d3dbd366832eee7f1c3 2024-11-27T13:26:11,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5cef3c37a70444d18e1b2a581965f160 is 50, key is test_row_0/B:col10/1732713969904/Put/seqid=0 2024-11-27T13:26:11,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714031286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742454_1630 (size=12151) 2024-11-27T13:26:11,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5cef3c37a70444d18e1b2a581965f160 2024-11-27T13:26:11,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/baf521ad65b04260babe4cf894418382 is 50, key is test_row_0/C:col10/1732713969904/Put/seqid=0 2024-11-27T13:26:11,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742455_1631 (size=12151) 2024-11-27T13:26:11,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/baf521ad65b04260babe4cf894418382 2024-11-27T13:26:11,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/1fa55ee469fa4d3dbd366832eee7f1c3 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3 2024-11-27T13:26:11,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3, entries=200, sequenceid=200, filesize=38.8 K 2024-11-27T13:26:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5cef3c37a70444d18e1b2a581965f160 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160 2024-11-27T13:26:11,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160, entries=150, sequenceid=200, filesize=11.9 K 2024-11-27T13:26:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/baf521ad65b04260babe4cf894418382 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382 2024-11-27T13:26:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382, entries=150, sequenceid=200, filesize=11.9 K 2024-11-27T13:26:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a5bdef3839c25f6b6634d128aa14c12e in 69ms, sequenceid=200, compaction requested=false 2024-11-27T13:26:11,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:11,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-27T13:26:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:11,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411271fb7011baf9b45b3835db106212d4b70_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742456_1632 (size=12304) 2024-11-27T13:26:11,403 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,406 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411271fb7011baf9b45b3835db106212d4b70_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271fb7011baf9b45b3835db106212d4b70_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,407 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b7f989c9bb474093975f046d50146797, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b7f989c9bb474093975f046d50146797 is 175, key is test_row_0/A:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742457_1633 (size=31105) 2024-11-27T13:26:11,422 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b7f989c9bb474093975f046d50146797 2024-11-27T13:26:11,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/2c2fc7c340b0471ea8456bad009d68c1 is 50, key is test_row_0/B:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742458_1634 (size=12151) 2024-11-27T13:26:11,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/2c2fc7c340b0471ea8456bad009d68c1 2024-11-27T13:26:11,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/83cb18d099514a9fbd09a9aeff90fad0 is 50, key is test_row_0/C:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742459_1635 (size=12151) 2024-11-27T13:26:11,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714031472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714031575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,610 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/6705d6fe9b3d49178350fa5cc2dd0a85 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6705d6fe9b3d49178350fa5cc2dd0a85 2024-11-27T13:26:11,614 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into 6705d6fe9b3d49178350fa5cc2dd0a85(size=12.3 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:11,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,614 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713971140; duration=0sec 2024-11-27T13:26:11,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,614 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:11,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:11,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714031779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:11,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/83cb18d099514a9fbd09a9aeff90fad0 2024-11-27T13:26:11,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:11,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:11,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:11,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:11,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b7f989c9bb474093975f046d50146797 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797 2024-11-27T13:26:11,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797, entries=150, sequenceid=213, filesize=30.4 K 2024-11-27T13:26:11,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/2c2fc7c340b0471ea8456bad009d68c1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1 2024-11-27T13:26:11,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T13:26:11,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/83cb18d099514a9fbd09a9aeff90fad0 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0 2024-11-27T13:26:11,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0, entries=150, sequenceid=213, filesize=11.9 K 2024-11-27T13:26:11,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a5bdef3839c25f6b6634d128aa14c12e in 494ms, sequenceid=213, compaction requested=true 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:11,885 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:11,885 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,886 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,886 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,886 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:11,886 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:11,886 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,886 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,886 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b8c0734f812843509189e876fe647a89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.0 K 2024-11-27T13:26:11,886 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=100.0 K 2024-11-27T13:26:11,887 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,887 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797] 2024-11-27T13:26:11,887 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8c0734f812843509189e876fe647a89, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,887 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ff1f282ca514737a025e74da87e040a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,888 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cef3c37a70444d18e1b2a581965f160, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713969903 2024-11-27T13:26:11,888 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fa55ee469fa4d3dbd366832eee7f1c3, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713969900 2024-11-27T13:26:11,888 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c2fc7c340b0471ea8456bad009d68c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:11,888 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b7f989c9bb474093975f046d50146797, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:11,897 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#540 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,897 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/00ea2f8dd9ac48d5ad785f860e83dc37 is 50, key is test_row_0/B:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,900 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,903 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112718f6904ee2024059b234a0e614dde57c_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,905 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112718f6904ee2024059b234a0e614dde57c_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,905 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112718f6904ee2024059b234a0e614dde57c_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:11,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742460_1636 (size=12663) 2024-11-27T13:26:11,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742461_1637 (size=4469) 2024-11-27T13:26:11,923 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/00ea2f8dd9ac48d5ad785f860e83dc37 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/00ea2f8dd9ac48d5ad785f860e83dc37 2024-11-27T13:26:11,926 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#541 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,927 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/813f53d4edf64a4a8f49c8ffe361388d is 175, key is test_row_0/A:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,928 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 00ea2f8dd9ac48d5ad785f860e83dc37(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:11,929 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,929 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713971885; duration=0sec 2024-11-27T13:26:11,929 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:11,929 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:11,929 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:11,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:11,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:11,930 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:11,930 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6705d6fe9b3d49178350fa5cc2dd0a85, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.0 K 2024-11-27T13:26:11,930 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6705d6fe9b3d49178350fa5cc2dd0a85, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732713969122 2024-11-27T13:26:11,931 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting baf521ad65b04260babe4cf894418382, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732713969903 2024-11-27T13:26:11,931 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83cb18d099514a9fbd09a9aeff90fad0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:11,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742462_1638 (size=31617) 2024-11-27T13:26:11,936 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/813f53d4edf64a4a8f49c8ffe361388d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d 2024-11-27T13:26:11,940 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#542 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:11,940 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into 813f53d4edf64a4a8f49c8ffe361388d(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:11,940 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:11,940 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713971885; duration=0sec 2024-11-27T13:26:11,940 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:11,940 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:11,940 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/2368ce67db3c4da7bc64d3494896a29a is 50, key is test_row_0/C:col10/1732713971391/Put/seqid=0 2024-11-27T13:26:11,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742463_1639 (size=12663) 2024-11-27T13:26:12,021 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:12,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:12,022 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:12,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:12,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:12,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274d3d61c601264ce4944e299f09271137_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713971468/Put/seqid=0 2024-11-27T13:26:12,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714032032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714032035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714032036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742464_1640 (size=12304) 2024-11-27T13:26:12,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:12,064 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411274d3d61c601264ce4944e299f09271137_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274d3d61c601264ce4944e299f09271137_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:12,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/dcfa1bdfa03a4fec887407b12cc37baa, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:12,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/dcfa1bdfa03a4fec887407b12cc37baa is 175, key is test_row_0/A:col10/1732713971468/Put/seqid=0 2024-11-27T13:26:12,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742465_1641 (size=31105) 2024-11-27T13:26:12,070 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/dcfa1bdfa03a4fec887407b12cc37baa 2024-11-27T13:26:12,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/3877a1bbe86146b58f588e03ea5323f9 is 50, key is test_row_0/B:col10/1732713971468/Put/seqid=0 2024-11-27T13:26:12,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742466_1642 (size=12151) 2024-11-27T13:26:12,079 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/3877a1bbe86146b58f588e03ea5323f9 2024-11-27T13:26:12,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714032083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/63b676d41900443dbe46d867055f3f89 is 50, key is test_row_0/C:col10/1732713971468/Put/seqid=0 2024-11-27T13:26:12,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742467_1643 (size=12151) 2024-11-27T13:26:12,090 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/63b676d41900443dbe46d867055f3f89 2024-11-27T13:26:12,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/dcfa1bdfa03a4fec887407b12cc37baa as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa 2024-11-27T13:26:12,097 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa, entries=150, sequenceid=239, filesize=30.4 K 2024-11-27T13:26:12,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/3877a1bbe86146b58f588e03ea5323f9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9 2024-11-27T13:26:12,101 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T13:26:12,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/63b676d41900443dbe46d867055f3f89 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89 2024-11-27T13:26:12,105 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89, entries=150, sequenceid=239, filesize=11.9 K 2024-11-27T13:26:12,106 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a5bdef3839c25f6b6634d128aa14c12e in 84ms, sequenceid=239, compaction requested=false 2024-11-27T13:26:12,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:12,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:12,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-27T13:26:12,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-11-27T13:26:12,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-27T13:26:12,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4600 sec 2024-11-27T13:26:12,110 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 1.4650 sec 2024-11-27T13:26:12,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:12,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:12,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:12,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279ed019e97534471bbea3efa8712d8602_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:12,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742468_1644 (size=12304) 2024-11-27T13:26:12,149 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:12,152 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279ed019e97534471bbea3efa8712d8602_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279ed019e97534471bbea3efa8712d8602_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:12,153 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/c2f366e57ce8443694a35726fb6a0e20, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:12,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/c2f366e57ce8443694a35726fb6a0e20 is 175, key is test_row_0/A:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:12,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742469_1645 (size=31105) 2024-11-27T13:26:12,159 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/c2f366e57ce8443694a35726fb6a0e20 2024-11-27T13:26:12,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/0a43c787500c43b29ba691fdce6da25b is 50, key is test_row_0/B:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:12,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714032170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714032170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714032171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742470_1646 (size=12151) 2024-11-27T13:26:12,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714032274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714032275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714032275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,350 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/2368ce67db3c4da7bc64d3494896a29a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/2368ce67db3c4da7bc64d3494896a29a 2024-11-27T13:26:12,353 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into 2368ce67db3c4da7bc64d3494896a29a(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:12,353 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:12,353 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713971885; duration=0sec 2024-11-27T13:26:12,353 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:12,353 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:12,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714032477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714032477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714032477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/0a43c787500c43b29ba691fdce6da25b 2024-11-27T13:26:12,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714032588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/5f98a87690384d489469f5e11521abf8 is 50, key is test_row_0/C:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:12,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742471_1647 (size=12151) 2024-11-27T13:26:12,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-27T13:26:12,750 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-27T13:26:12,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:12,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-11-27T13:26:12,753 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:12,753 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:12,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:12,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714032780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714032781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714032781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:12,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:12,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-27T13:26:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:12,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:12,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:12,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:12,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:13,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/5f98a87690384d489469f5e11521abf8 2024-11-27T13:26:13,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/c2f366e57ce8443694a35726fb6a0e20 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20 2024-11-27T13:26:13,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20, entries=150, sequenceid=253, filesize=30.4 K 2024-11-27T13:26:13,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/0a43c787500c43b29ba691fdce6da25b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b 2024-11-27T13:26:13,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T13:26:13,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/5f98a87690384d489469f5e11521abf8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8 2024-11-27T13:26:13,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8, entries=150, sequenceid=253, filesize=11.9 K 2024-11-27T13:26:13,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for a5bdef3839c25f6b6634d128aa14c12e in 881ms, sequenceid=253, compaction requested=true 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:13,019 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:13,019 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93827 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:13,020 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:13,020 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:13,020 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/00ea2f8dd9ac48d5ad785f860e83dc37, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.1 K 2024-11-27T13:26:13,020 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=91.6 K 2024-11-27T13:26:13,020 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20] 2024-11-27T13:26:13,020 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 00ea2f8dd9ac48d5ad785f860e83dc37, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:13,021 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 813f53d4edf64a4a8f49c8ffe361388d, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:13,021 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcfa1bdfa03a4fec887407b12cc37baa, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713971434 2024-11-27T13:26:13,021 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3877a1bbe86146b58f588e03ea5323f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713971434 2024-11-27T13:26:13,021 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2f366e57ce8443694a35726fb6a0e20, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:13,021 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a43c787500c43b29ba691fdce6da25b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:13,028 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:13,033 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#550 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:13,033 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/44854d6de6194bd38bfe4263c72c3c25 is 50, key is test_row_0/B:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:13,040 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127a9cb16a4e03246f894ad2f639b2820e5_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:13,041 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127a9cb16a4e03246f894ad2f639b2820e5_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:13,042 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127a9cb16a4e03246f894ad2f639b2820e5_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:13,057 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:13,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:13,058 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:13,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:13,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742472_1648 (size=12765) 2024-11-27T13:26:13,082 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/44854d6de6194bd38bfe4263c72c3c25 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/44854d6de6194bd38bfe4263c72c3c25 2024-11-27T13:26:13,086 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 44854d6de6194bd38bfe4263c72c3c25(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:13,086 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:13,086 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713973019; duration=0sec 2024-11-27T13:26:13,086 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:13,086 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:13,086 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:13,087 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:13,088 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:13,088 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:13,088 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/2368ce67db3c4da7bc64d3494896a29a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.1 K 2024-11-27T13:26:13,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2368ce67db3c4da7bc64d3494896a29a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732713971277 2024-11-27T13:26:13,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 63b676d41900443dbe46d867055f3f89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732713971434 2024-11-27T13:26:13,089 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f98a87690384d489469f5e11521abf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:13,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742473_1649 (size=4469) 2024-11-27T13:26:13,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272d48e3413c18486a8e583bb0db7c79b3_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713972168/Put/seqid=0 2024-11-27T13:26:13,101 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#552 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:13,102 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/d13f6b97c31b4661b6e0d087ee9006e1 is 50, key is test_row_0/C:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:13,102 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#549 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:13,103 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/f4b235fccb154ac7a63dcd1b4e078652 is 175, key is test_row_0/A:col10/1732713972034/Put/seqid=0 2024-11-27T13:26:13,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742474_1650 (size=12454) 2024-11-27T13:26:13,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742476_1652 (size=31719) 2024-11-27T13:26:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742475_1651 (size=12765) 2024-11-27T13:26:13,111 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272d48e3413c18486a8e583bb0db7c79b3_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272d48e3413c18486a8e583bb0db7c79b3_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:13,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5dca65f7874242299ef882d82145a863, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:13,112 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/d13f6b97c31b4661b6e0d087ee9006e1 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/d13f6b97c31b4661b6e0d087ee9006e1 2024-11-27T13:26:13,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5dca65f7874242299ef882d82145a863 is 175, key is test_row_0/A:col10/1732713972168/Put/seqid=0 2024-11-27T13:26:13,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742477_1653 (size=31255) 2024-11-27T13:26:13,117 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into d13f6b97c31b4661b6e0d087ee9006e1(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:13,117 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:13,117 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713973019; duration=0sec 2024-11-27T13:26:13,117 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:13,117 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:13,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:13,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714033296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714033296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714033296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:13,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714033400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714033400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714033400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,512 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/f4b235fccb154ac7a63dcd1b4e078652 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652 2024-11-27T13:26:13,516 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5dca65f7874242299ef882d82145a863 2024-11-27T13:26:13,516 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into f4b235fccb154ac7a63dcd1b4e078652(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:13,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:13,516 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713973019; duration=0sec 2024-11-27T13:26:13,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:13,516 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:13,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/a7e06946d7ac4e8a9903498b463c5e51 is 50, key is test_row_0/B:col10/1732713972168/Put/seqid=0 2024-11-27T13:26:13,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742478_1654 (size=12301) 2024-11-27T13:26:13,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714033598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714033601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714033601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714033607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:13,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714033904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714033909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714033912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:13,927 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/a7e06946d7ac4e8a9903498b463c5e51 2024-11-27T13:26:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b78c1834bca440e6b1d379b77cb087d5 is 50, key is test_row_0/C:col10/1732713972168/Put/seqid=0 2024-11-27T13:26:13,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742479_1655 (size=12301) 2024-11-27T13:26:14,337 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b78c1834bca440e6b1d379b77cb087d5 2024-11-27T13:26:14,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/5dca65f7874242299ef882d82145a863 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863 2024-11-27T13:26:14,345 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863, entries=150, sequenceid=277, filesize=30.5 K 2024-11-27T13:26:14,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/a7e06946d7ac4e8a9903498b463c5e51 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51 2024-11-27T13:26:14,349 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51, entries=150, sequenceid=277, filesize=12.0 K 2024-11-27T13:26:14,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b78c1834bca440e6b1d379b77cb087d5 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5 2024-11-27T13:26:14,352 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5, entries=150, sequenceid=277, filesize=12.0 K 2024-11-27T13:26:14,353 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a5bdef3839c25f6b6634d128aa14c12e in 1295ms, sequenceid=277, compaction requested=false 2024-11-27T13:26:14,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:14,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:14,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-27T13:26:14,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-11-27T13:26:14,355 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-27T13:26:14,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6010 sec 2024-11-27T13:26:14,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 1.6050 sec 2024-11-27T13:26:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:14,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:26:14,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:14,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:14,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272b05d20e669047d1ac47e3add5ba1e15_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742480_1656 (size=14994) 2024-11-27T13:26:14,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714034440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714034441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714034441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714034544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714034544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714034544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714034747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714034747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:14,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714034748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:14,821 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:14,825 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411272b05d20e669047d1ac47e3add5ba1e15_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272b05d20e669047d1ac47e3add5ba1e15_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:14,825 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/240299efb45f4aadbde6c07a4f158150, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:14,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/240299efb45f4aadbde6c07a4f158150 is 175, key is test_row_0/A:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742481_1657 (size=39949) 2024-11-27T13:26:14,830 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/240299efb45f4aadbde6c07a4f158150 2024-11-27T13:26:14,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4e9043e02d00427cb57654975f9d9b81 is 50, key is test_row_0/B:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742482_1658 (size=12301) 2024-11-27T13:26:14,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4e9043e02d00427cb57654975f9d9b81 2024-11-27T13:26:14,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/8790f083695147d0a55a7e7d78ea17d2 is 50, key is test_row_0/C:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742483_1659 (size=12301) 2024-11-27T13:26:14,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/8790f083695147d0a55a7e7d78ea17d2 2024-11-27T13:26:14,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/240299efb45f4aadbde6c07a4f158150 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150 2024-11-27T13:26:14,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150, entries=200, sequenceid=294, filesize=39.0 K 2024-11-27T13:26:14,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/4e9043e02d00427cb57654975f9d9b81 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81 2024-11-27T13:26:14,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-27T13:26:14,857 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-27T13:26:14,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-11-27T13:26:14,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81, entries=150, sequenceid=294, filesize=12.0 K 2024-11-27T13:26:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-27T13:26:14,860 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:14,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/8790f083695147d0a55a7e7d78ea17d2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2 2024-11-27T13:26:14,860 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:14,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:14,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2, entries=150, sequenceid=294, filesize=12.0 K 2024-11-27T13:26:14,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a5bdef3839c25f6b6634d128aa14c12e in 453ms, sequenceid=294, compaction requested=true 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:14,864 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:14,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-27T13:26:14,865 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:14,865 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102923 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:14,865 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:14,865 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:14,866 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=100.5 K 2024-11-27T13:26:14,866 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150] 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:14,866 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4b235fccb154ac7a63dcd1b4e078652, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:14,866 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/44854d6de6194bd38bfe4263c72c3c25, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.5 K 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dca65f7874242299ef882d82145a863, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732713972168 2024-11-27T13:26:14,866 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 44854d6de6194bd38bfe4263c72c3c25, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:14,867 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 240299efb45f4aadbde6c07a4f158150, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973291 2024-11-27T13:26:14,867 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting a7e06946d7ac4e8a9903498b463c5e51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732713972168 2024-11-27T13:26:14,880 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e9043e02d00427cb57654975f9d9b81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973295 2024-11-27T13:26:14,886 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:14,889 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:14,889 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/879ad1ee9fd04da7a85a064c004e5e1b is 50, key is test_row_0/B:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,890 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241127b6e43a0c6f0d4bef95defd961760a433_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:14,892 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241127b6e43a0c6f0d4bef95defd961760a433_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:14,892 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127b6e43a0c6f0d4bef95defd961760a433_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:14,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742484_1660 (size=13017) 2024-11-27T13:26:14,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742485_1661 (size=4469) 2024-11-27T13:26:14,898 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#558 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:14,898 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/879ad1ee9fd04da7a85a064c004e5e1b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/879ad1ee9fd04da7a85a064c004e5e1b 2024-11-27T13:26:14,899 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b911753f6f564433ac217e548b8cc07d is 175, key is test_row_0/A:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,904 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 879ad1ee9fd04da7a85a064c004e5e1b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:14,904 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:14,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742486_1662 (size=31971) 2024-11-27T13:26:14,904 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713974864; duration=0sec 2024-11-27T13:26:14,904 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:14,904 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:14,904 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:14,905 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:14,905 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:14,905 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:14,905 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/d13f6b97c31b4661b6e0d087ee9006e1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.5 K 2024-11-27T13:26:14,905 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting d13f6b97c31b4661b6e0d087ee9006e1, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732713972030 2024-11-27T13:26:14,906 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b78c1834bca440e6b1d379b77cb087d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732713972168 2024-11-27T13:26:14,906 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8790f083695147d0a55a7e7d78ea17d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973295 2024-11-27T13:26:14,914 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#560 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:14,914 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b0dddf2405ca439c878f4d58850d2585 is 50, key is test_row_0/C:col10/1732713974410/Put/seqid=0 2024-11-27T13:26:14,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742487_1663 (size=13017) 2024-11-27T13:26:14,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-27T13:26:15,012 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:15,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-27T13:26:15,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:15,012 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:15,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:15,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e29977dcca3145a29e80348583dad02c_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713974430/Put/seqid=0 2024-11-27T13:26:15,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742488_1664 (size=12454) 2024-11-27T13:26:15,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:15,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:15,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714035062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714035063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714035064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-27T13:26:15,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714035166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714035166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714035167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,308 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b911753f6f564433ac217e548b8cc07d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d 2024-11-27T13:26:15,312 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into b911753f6f564433ac217e548b8cc07d(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:15,312 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:15,312 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713974864; duration=0sec 2024-11-27T13:26:15,312 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:15,312 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:15,321 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b0dddf2405ca439c878f4d58850d2585 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b0dddf2405ca439c878f4d58850d2585 2024-11-27T13:26:15,325 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into b0dddf2405ca439c878f4d58850d2585(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:15,325 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:15,325 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713974864; duration=0sec 2024-11-27T13:26:15,325 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:15,325 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:15,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714035369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714035369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714035370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:15,426 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e29977dcca3145a29e80348583dad02c_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e29977dcca3145a29e80348583dad02c_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:15,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/146ebdb0dc6e4f3280de01ed4d379b3b, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:15,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/146ebdb0dc6e4f3280de01ed4d379b3b is 175, key is test_row_0/A:col10/1732713974430/Put/seqid=0 2024-11-27T13:26:15,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742489_1665 (size=31255) 2024-11-27T13:26:15,431 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/146ebdb0dc6e4f3280de01ed4d379b3b 2024-11-27T13:26:15,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9f78e6a93dfa456a98d3820f615e6e8e is 50, key is test_row_0/B:col10/1732713974430/Put/seqid=0 2024-11-27T13:26:15,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742490_1666 (size=12301) 2024-11-27T13:26:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-27T13:26:15,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714035608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,609 DEBUG [Thread-2537 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:15,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714035672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714035672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:15,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714035673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:15,841 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9f78e6a93dfa456a98d3820f615e6e8e 2024-11-27T13:26:15,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fe16e83043e44247a447d061894d7db9 is 50, key is test_row_0/C:col10/1732713974430/Put/seqid=0 2024-11-27T13:26:15,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742491_1667 (size=12301) 2024-11-27T13:26:15,852 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fe16e83043e44247a447d061894d7db9 2024-11-27T13:26:15,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/146ebdb0dc6e4f3280de01ed4d379b3b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b 2024-11-27T13:26:15,859 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b, entries=150, sequenceid=317, filesize=30.5 K 2024-11-27T13:26:15,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/9f78e6a93dfa456a98d3820f615e6e8e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e 2024-11-27T13:26:15,863 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e, entries=150, sequenceid=317, filesize=12.0 K 2024-11-27T13:26:15,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fe16e83043e44247a447d061894d7db9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9 2024-11-27T13:26:15,868 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9, entries=150, sequenceid=317, filesize=12.0 K 2024-11-27T13:26:15,868 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a5bdef3839c25f6b6634d128aa14c12e in 856ms, sequenceid=317, compaction requested=false 2024-11-27T13:26:15,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:15,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:15,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-27T13:26:15,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-11-27T13:26:15,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-11-27T13:26:15,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0100 sec 2024-11-27T13:26:15,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 1.0130 sec 2024-11-27T13:26:15,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-27T13:26:15,963 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-27T13:26:15,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:15,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-11-27T13:26:15,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-27T13:26:15,965 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:15,966 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:15,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:16,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-27T13:26:16,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:16,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:16,118 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:16,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:16,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:16,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e0a460d9fd3440a5a2dced0291d9fb5e_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713975061/Put/seqid=0 2024-11-27T13:26:16,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742492_1668 (size=12454) 2024-11-27T13:26:16,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:16,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:16,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714036205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714036206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714036206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-27T13:26:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714036309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714036309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714036309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714036512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714036512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714036512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:16,537 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127e0a460d9fd3440a5a2dced0291d9fb5e_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0a460d9fd3440a5a2dced0291d9fb5e_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d20a6928b6ce4c419711ad6eea73f92e, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d20a6928b6ce4c419711ad6eea73f92e is 175, key is test_row_0/A:col10/1732713975061/Put/seqid=0 2024-11-27T13:26:16,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742493_1669 (size=31255) 2024-11-27T13:26:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-27T13:26:16,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714036815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714036817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714036817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:16,942 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d20a6928b6ce4c419711ad6eea73f92e 2024-11-27T13:26:16,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/b6f58bbb088948b78dea7ed7f29636ea is 50, key is test_row_0/B:col10/1732713975061/Put/seqid=0 2024-11-27T13:26:16,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742494_1670 (size=12301) 2024-11-27T13:26:16,953 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/b6f58bbb088948b78dea7ed7f29636ea 2024-11-27T13:26:16,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/a97be4083c2c42b9aa94c96ea3827a5b is 50, key is test_row_0/C:col10/1732713975061/Put/seqid=0 2024-11-27T13:26:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742495_1671 (size=12301) 2024-11-27T13:26:16,963 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/a97be4083c2c42b9aa94c96ea3827a5b 2024-11-27T13:26:16,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d20a6928b6ce4c419711ad6eea73f92e as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e 2024-11-27T13:26:16,972 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e, entries=150, sequenceid=333, filesize=30.5 K 2024-11-27T13:26:16,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/b6f58bbb088948b78dea7ed7f29636ea as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea 2024-11-27T13:26:16,977 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea, entries=150, sequenceid=333, filesize=12.0 K 2024-11-27T13:26:16,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/a97be4083c2c42b9aa94c96ea3827a5b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b 2024-11-27T13:26:16,981 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b, entries=150, sequenceid=333, filesize=12.0 K 2024-11-27T13:26:16,981 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for a5bdef3839c25f6b6634d128aa14c12e in 863ms, sequenceid=333, compaction requested=true 2024-11-27T13:26:16,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:16,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:16,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-11-27T13:26:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-11-27T13:26:16,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-11-27T13:26:16,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0160 sec 2024-11-27T13:26:16,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 1.0200 sec 2024-11-27T13:26:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-27T13:26:17,068 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-11-27T13:26:17,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:17,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees 2024-11-27T13:26:17,071 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:17,071 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:17,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:17,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-27T13:26:17,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-27T13:26:17,223 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:17,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-27T13:26:17,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:17,224 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:17,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279403a971ba0a461aa9e79083b9bb8cb7_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713976195/Put/seqid=0 2024-11-27T13:26:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742496_1672 (size=12454) 2024-11-27T13:26:17,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:17,244 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279403a971ba0a461aa9e79083b9bb8cb7_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279403a971ba0a461aa9e79083b9bb8cb7_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:17,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/87b2eb51dcdd42288d0f64ed6ac0a80b, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:17,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/87b2eb51dcdd42288d0f64ed6ac0a80b is 175, key is test_row_0/A:col10/1732713976195/Put/seqid=0 2024-11-27T13:26:17,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742497_1673 (size=31255) 2024-11-27T13:26:17,250 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/87b2eb51dcdd42288d0f64ed6ac0a80b 2024-11-27T13:26:17,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/304d4b73162f4ccda39d4f4b52fac1cc is 50, key is test_row_0/B:col10/1732713976195/Put/seqid=0 2024-11-27T13:26:17,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742498_1674 (size=12301) 2024-11-27T13:26:17,262 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/304d4b73162f4ccda39d4f4b52fac1cc 2024-11-27T13:26:17,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/6787217b535748faab4ae45363e6b0b2 is 50, key is test_row_0/C:col10/1732713976195/Put/seqid=0 2024-11-27T13:26:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742499_1675 (size=12301) 2024-11-27T13:26:17,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:17,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:17,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714037332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714037333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714037334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-27T13:26:17,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714037437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714037437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714037438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714037640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714037640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714037641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,671 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/6787217b535748faab4ae45363e6b0b2 2024-11-27T13:26:17,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-27T13:26:17,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/87b2eb51dcdd42288d0f64ed6ac0a80b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b 2024-11-27T13:26:17,679 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b, entries=150, sequenceid=356, filesize=30.5 K 2024-11-27T13:26:17,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/304d4b73162f4ccda39d4f4b52fac1cc as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc 2024-11-27T13:26:17,682 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc, entries=150, sequenceid=356, filesize=12.0 K 2024-11-27T13:26:17,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/6787217b535748faab4ae45363e6b0b2 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2 2024-11-27T13:26:17,687 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2, entries=150, sequenceid=356, filesize=12.0 K 2024-11-27T13:26:17,688 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a5bdef3839c25f6b6634d128aa14c12e in 463ms, sequenceid=356, compaction requested=true 2024-11-27T13:26:17,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:17,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:17,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-11-27T13:26:17,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-11-27T13:26:17,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-11-27T13:26:17,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 618 msec 2024-11-27T13:26:17,691 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees in 621 msec 2024-11-27T13:26:17,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:17,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:17,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:17,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127637d36b7c736496a81792beb54e62783_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:17,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742500_1676 (size=14994) 2024-11-27T13:26:17,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714037967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714037969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:17,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714037970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714038071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714038074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714038074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-27T13:26:18,175 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-11-27T13:26:18,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees 2024-11-27T13:26:18,178 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:18,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:18,179 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:18,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:18,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714038275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:18,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714038277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714038278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:18,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-27T13:26:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,358 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:18,361 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127637d36b7c736496a81792beb54e62783_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127637d36b7c736496a81792beb54e62783_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:18,362 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/6d6e07c69b5b42dc809166646774fe41, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/6d6e07c69b5b42dc809166646774fe41 is 175, key is test_row_0/A:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742501_1677 (size=39949) 2024-11-27T13:26:18,367 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=371, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/6d6e07c69b5b42dc809166646774fe41 2024-11-27T13:26:18,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/8955c9af839e41d1a6fc083ce2f2bc9f is 50, key is test_row_0/B:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742502_1678 (size=12301) 2024-11-27T13:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:18,483 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:18,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-27T13:26:18,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:18,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714038577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714038582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714038582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:18,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:18,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-27T13:26:18,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:18,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:18,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/8955c9af839e41d1a6fc083ce2f2bc9f 2024-11-27T13:26:18,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac is 50, key is test_row_0/C:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:18,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-27T13:26:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:18,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:18,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742503_1679 (size=12301) 2024-11-27T13:26:18,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac 2024-11-27T13:26:18,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/6d6e07c69b5b42dc809166646774fe41 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41 2024-11-27T13:26:18,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41, entries=200, sequenceid=371, filesize=39.0 K 2024-11-27T13:26:18,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/8955c9af839e41d1a6fc083ce2f2bc9f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f 2024-11-27T13:26:18,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f, entries=150, sequenceid=371, filesize=12.0 K 2024-11-27T13:26:18,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac 2024-11-27T13:26:18,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac, entries=150, sequenceid=371, filesize=12.0 K 2024-11-27T13:26:18,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a5bdef3839c25f6b6634d128aa14c12e in 863ms, sequenceid=371, compaction requested=true 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:18,809 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:18,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:18,809 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 165685 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:18,811 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:26:18,811 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=161.8 K 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:18,811 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,811 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41] 2024-11-27T13:26:18,811 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/879ad1ee9fd04da7a85a064c004e5e1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=60.8 K 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b911753f6f564433ac217e548b8cc07d, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973295 2024-11-27T13:26:18,811 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 879ad1ee9fd04da7a85a064c004e5e1b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973295 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 146ebdb0dc6e4f3280de01ed4d379b3b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713974430 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f78e6a93dfa456a98d3820f615e6e8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713974430 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d20a6928b6ce4c419711ad6eea73f92e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732713975058 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b6f58bbb088948b78dea7ed7f29636ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732713975058 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87b2eb51dcdd42288d0f64ed6ac0a80b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732713976195 2024-11-27T13:26:18,812 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 304d4b73162f4ccda39d4f4b52fac1cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732713976195 2024-11-27T13:26:18,813 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d6e07c69b5b42dc809166646774fe41, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977332 2024-11-27T13:26:18,813 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8955c9af839e41d1a6fc083ce2f2bc9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977333 2024-11-27T13:26:18,822 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,822 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#573 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:18,823 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/313d690a71e940a3a6a0157842af7910 is 50, key is test_row_0/B:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,823 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411279ae594d24d054c98a24d8c91c65c2b3d_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,827 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411279ae594d24d054c98a24d8c91c65c2b3d_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,827 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411279ae594d24d054c98a24d8c91c65c2b3d_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742505_1681 (size=4469) 2024-11-27T13:26:18,831 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#574 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:18,831 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/23998b48a0874112a25e4a2bbdfcb73b is 175, key is test_row_0/A:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742504_1680 (size=13187) 2024-11-27T13:26:18,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742506_1682 (size=32141) 2024-11-27T13:26:18,837 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/313d690a71e940a3a6a0157842af7910 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/313d690a71e940a3a6a0157842af7910 2024-11-27T13:26:18,838 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/23998b48a0874112a25e4a2bbdfcb73b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b 2024-11-27T13:26:18,842 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into 23998b48a0874112a25e4a2bbdfcb73b(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:18,842 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 313d690a71e940a3a6a0157842af7910(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:18,842 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=11, startTime=1732713978809; duration=0sec 2024-11-27T13:26:18,842 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=11, startTime=1732713978809; duration=0sec 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:18,842 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:18,843 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-27T13:26:18,843 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-27T13:26:18,844 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:18,844 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,844 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b0dddf2405ca439c878f4d58850d2585, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=60.8 K 2024-11-27T13:26:18,845 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0dddf2405ca439c878f4d58850d2585, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732713973295 2024-11-27T13:26:18,845 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe16e83043e44247a447d061894d7db9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732713974430 2024-11-27T13:26:18,845 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a97be4083c2c42b9aa94c96ea3827a5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732713975058 2024-11-27T13:26:18,845 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6787217b535748faab4ae45363e6b0b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732713976195 2024-11-27T13:26:18,846 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1f7ddae2ba649b7aef6ba5b2d3d72ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977333 2024-11-27T13:26:18,853 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#575 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:18,855 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fa5c4aa979ee452f9f4624ed5722ebf7 is 50, key is test_row_0/C:col10/1732713977333/Put/seqid=0 2024-11-27T13:26:18,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742507_1683 (size=13187) 2024-11-27T13:26:18,863 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/fa5c4aa979ee452f9f4624ed5722ebf7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fa5c4aa979ee452f9f4624ed5722ebf7 2024-11-27T13:26:18,868 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into fa5c4aa979ee452f9f4624ed5722ebf7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:18,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:18,868 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=11, startTime=1732713978809; duration=0sec 2024-11-27T13:26:18,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:18,868 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:18,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:18,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-27T13:26:18,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:18,943 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:18,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:18,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f74a234dd8da480780fcf4bb40f53813_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713977968/Put/seqid=0 2024-11-27T13:26:18,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742508_1684 (size=12454) 2024-11-27T13:26:18,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:18,957 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127f74a234dd8da480780fcf4bb40f53813_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f74a234dd8da480780fcf4bb40f53813_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:18,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/7c4de3a48cad46c2a62fe4697c19d74c, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:18,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/7c4de3a48cad46c2a62fe4697c19d74c is 175, key is test_row_0/A:col10/1732713977968/Put/seqid=0 2024-11-27T13:26:18,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742509_1685 (size=31255) 2024-11-27T13:26:18,969 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/7c4de3a48cad46c2a62fe4697c19d74c 2024-11-27T13:26:18,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/773a736d7211477e9d7adaf2baf4370c is 50, key is test_row_0/B:col10/1732713977968/Put/seqid=0 2024-11-27T13:26:18,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742510_1686 (size=12301) 2024-11-27T13:26:19,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:19,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714039094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714039095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714039095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36266 deadline: 1732714039165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,167 DEBUG [Thread-2541 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18206 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:19,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714039197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714039199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714039199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:19,387 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/773a736d7211477e9d7adaf2baf4370c 2024-11-27T13:26:19,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/9775317150394ae08a8987dfd626521b is 50, key is test_row_0/C:col10/1732713977968/Put/seqid=0 2024-11-27T13:26:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742511_1687 (size=12301) 2024-11-27T13:26:19,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714039399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714039402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714039403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36296 deadline: 1732714039630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,632 DEBUG [Thread-2537 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., hostname=a0541979a851,32819,1732713812705, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T13:26:19,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714039703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714039704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714039705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:19,798 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/9775317150394ae08a8987dfd626521b 2024-11-27T13:26:19,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/7c4de3a48cad46c2a62fe4697c19d74c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c 2024-11-27T13:26:19,805 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c, entries=150, sequenceid=395, filesize=30.5 K 2024-11-27T13:26:19,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/773a736d7211477e9d7adaf2baf4370c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c 2024-11-27T13:26:19,809 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T13:26:19,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/9775317150394ae08a8987dfd626521b as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b 2024-11-27T13:26:19,813 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b, entries=150, sequenceid=395, filesize=12.0 K 2024-11-27T13:26:19,814 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a5bdef3839c25f6b6634d128aa14c12e in 872ms, sequenceid=395, compaction requested=false 2024-11-27T13:26:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=192 2024-11-27T13:26:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=192 2024-11-27T13:26:19,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-11-27T13:26:19,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6370 sec 2024-11-27T13:26:19,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees in 1.6410 sec 2024-11-27T13:26:20,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:20,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:20,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:20,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278aae2d41e8c94efd99fdd5d23f430cae_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:20,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742512_1688 (size=12454) 2024-11-27T13:26:20,219 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:20,222 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411278aae2d41e8c94efd99fdd5d23f430cae_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278aae2d41e8c94efd99fdd5d23f430cae_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:20,225 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/14967140b57a474cb8c9168bbcc5026a, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:20,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/14967140b57a474cb8c9168bbcc5026a is 175, key is test_row_0/A:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:20,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714040224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714040225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714040226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742513_1689 (size=31255) 2024-11-27T13:26:20,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-27T13:26:20,282 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 191 completed 2024-11-27T13:26:20,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-27T13:26:20,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=193, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees 2024-11-27T13:26:20,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:20,285 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=193, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-27T13:26:20,285 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=193, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T13:26:20,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T13:26:20,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714040329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714040329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714040329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:20,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:20,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:20,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:20,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714040531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714040531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714040532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:20,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:20,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:20,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:20,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,631 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=412, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/14967140b57a474cb8c9168bbcc5026a 2024-11-27T13:26:20,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/93c73f9b254248cb8543202fdbf23c71 is 50, key is test_row_0/B:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:20,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742514_1690 (size=12301) 2024-11-27T13:26:20,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/93c73f9b254248cb8543202fdbf23c71 2024-11-27T13:26:20,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b6a96d800822468da1b3e8c3681b0f77 is 50, key is test_row_0/C:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:20,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742515_1691 (size=12301) 2024-11-27T13:26:20,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:20,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:20,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:20,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36302 deadline: 1732714040835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36300 deadline: 1732714040835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-27T13:26:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36284 deadline: 1732714040836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 2024-11-27T13:26:20,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:20,895 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:20,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:20,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:20,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:20,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-2 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:20,930 DEBUG [Thread-2550 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:59011 2024-11-27T13:26:20,930 DEBUG [Thread-2550 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:20,930 DEBUG [Thread-2552 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:59011 2024-11-27T13:26:20,931 DEBUG [Thread-2552 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:20,931 DEBUG [Thread-2546 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:59011 2024-11-27T13:26:20,931 DEBUG [Thread-2546 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:20,933 DEBUG [Thread-2554 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:59011 2024-11-27T13:26:20,933 DEBUG [Thread-2554 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:20,933 DEBUG [Thread-2548 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:59011 2024-11-27T13:26:20,933 DEBUG [Thread-2548 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:21,048 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:21,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] handler.RSProcedureHandler(58): pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:21,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-0 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=194 java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:21,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4114): Remote procedure failed, pid=194 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T13:26:21,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b6a96d800822468da1b3e8c3681b0f77 2024-11-27T13:26:21,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/14967140b57a474cb8c9168bbcc5026a as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a 2024-11-27T13:26:21,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a, entries=150, sequenceid=412, filesize=30.5 K 2024-11-27T13:26:21,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/93c73f9b254248cb8543202fdbf23c71 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71 2024-11-27T13:26:21,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:26:21,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/b6a96d800822468da1b3e8c3681b0f77 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77 2024-11-27T13:26:21,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77, entries=150, sequenceid=412, filesize=12.0 K 2024-11-27T13:26:21,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a5bdef3839c25f6b6634d128aa14c12e in 857ms, sequenceid=412, compaction requested=true 2024-11-27T13:26:21,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:A, priority=-2147483648, current under compaction store size is 1 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:B, priority=-2147483648, current under compaction store size is 2 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a5bdef3839c25f6b6634d128aa14c12e:C, priority=-2147483648, current under compaction store size is 3 2024-11-27T13:26:21,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/A is initiating minor compaction (all files) 2024-11-27T13:26:21,066 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/B is initiating minor compaction (all files) 2024-11-27T13:26:21,066 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/B in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,066 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/A in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,066 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=92.4 K 2024-11-27T13:26:21,066 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/313d690a71e940a3a6a0157842af7910, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.9 K 2024-11-27T13:26:21,066 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. files: [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a] 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 313d690a71e940a3a6a0157842af7910, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977333 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23998b48a0874112a25e4a2bbdfcb73b, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977333 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 773a736d7211477e9d7adaf2baf4370c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732713977963 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c4de3a48cad46c2a62fe4697c19d74c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732713977963 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 93c73f9b254248cb8543202fdbf23c71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713979094 2024-11-27T13:26:21,067 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14967140b57a474cb8c9168bbcc5026a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713979094 2024-11-27T13:26:21,072 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:21,072 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#B#compaction#582 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:21,073 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/40e7660452f6497ea47961444771ebdf is 50, key is test_row_0/B:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:21,073 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112758c29c3b152e454a8b9d0202fa1f05af_a5bdef3839c25f6b6634d128aa14c12e store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:21,076 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112758c29c3b152e454a8b9d0202fa1f05af_a5bdef3839c25f6b6634d128aa14c12e, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:21,076 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112758c29c3b152e454a8b9d0202fa1f05af_a5bdef3839c25f6b6634d128aa14c12e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:21,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742516_1692 (size=13289) 2024-11-27T13:26:21,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742517_1693 (size=4469) 2024-11-27T13:26:21,200 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:21,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=194 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,201 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:21,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:21,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112785bf1d9e065047feb8fcc5f422cb7d39_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713980223/Put/seqid=0 2024-11-27T13:26:21,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742518_1694 (size=12454) 2024-11-27T13:26:21,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32819 {}] regionserver.HRegion(8581): Flush requested on a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:21,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. as already flushing 2024-11-27T13:26:21,339 DEBUG [Thread-2543 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:59011 2024-11-27T13:26:21,339 DEBUG [Thread-2543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:21,340 DEBUG [Thread-2539 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:59011 2024-11-27T13:26:21,340 DEBUG [Thread-2539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:21,340 DEBUG [Thread-2535 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:59011 2024-11-27T13:26:21,340 DEBUG [Thread-2535 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:21,480 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#A#compaction#583 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:21,481 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b347aa832ff94e808f362ec3fdbbc9c8 is 175, key is test_row_0/A:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:21,482 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/40e7660452f6497ea47961444771ebdf as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/40e7660452f6497ea47961444771ebdf 2024-11-27T13:26:21,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742519_1695 (size=32243) 2024-11-27T13:26:21,486 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/B of a5bdef3839c25f6b6634d128aa14c12e into 40e7660452f6497ea47961444771ebdf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:21,486 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:21,486 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/B, priority=13, startTime=1732713981066; duration=0sec 2024-11-27T13:26:21,486 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-27T13:26:21,486 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:B 2024-11-27T13:26:21,487 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T13:26:21,487 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T13:26:21,487 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1540): a5bdef3839c25f6b6634d128aa14c12e/C is initiating minor compaction (all files) 2024-11-27T13:26:21,487 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a5bdef3839c25f6b6634d128aa14c12e/C in TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:21,487 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fa5c4aa979ee452f9f4624ed5722ebf7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77] into tmpdir=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp, totalSize=36.9 K 2024-11-27T13:26:21,487 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/b347aa832ff94e808f362ec3fdbbc9c8 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b347aa832ff94e808f362ec3fdbbc9c8 2024-11-27T13:26:21,488 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting fa5c4aa979ee452f9f4624ed5722ebf7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732713977333 2024-11-27T13:26:21,488 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9775317150394ae08a8987dfd626521b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732713977963 2024-11-27T13:26:21,488 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] compactions.Compactor(224): Compacting b6a96d800822468da1b3e8c3681b0f77, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732713979094 2024-11-27T13:26:21,491 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/A of a5bdef3839c25f6b6634d128aa14c12e into b347aa832ff94e808f362ec3fdbbc9c8(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:21,491 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:21,491 INFO [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/A, priority=13, startTime=1732713981065; duration=0sec 2024-11-27T13:26:21,491 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:21,491 DEBUG [RS:0;a0541979a851:32819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:A 2024-11-27T13:26:21,493 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a5bdef3839c25f6b6634d128aa14c12e#C#compaction#585 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T13:26:21,493 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/0f7ef2f38b2440468f196f44b946eb93 is 50, key is test_row_0/C:col10/1732713979094/Put/seqid=0 2024-11-27T13:26:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742520_1696 (size=13289) 2024-11-27T13:26:21,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:21,613 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112785bf1d9e065047feb8fcc5f422cb7d39_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112785bf1d9e065047feb8fcc5f422cb7d39_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:21,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d0f591c9c52547d48347ba517bd1a75d, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:21,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d0f591c9c52547d48347ba517bd1a75d is 175, key is test_row_0/A:col10/1732713980223/Put/seqid=0 2024-11-27T13:26:21,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742521_1697 (size=31255) 2024-11-27T13:26:21,900 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/0f7ef2f38b2440468f196f44b946eb93 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0f7ef2f38b2440468f196f44b946eb93 2024-11-27T13:26:21,903 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a5bdef3839c25f6b6634d128aa14c12e/C of a5bdef3839c25f6b6634d128aa14c12e into 0f7ef2f38b2440468f196f44b946eb93(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T13:26:21,903 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:21,903 INFO [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e., storeName=a5bdef3839c25f6b6634d128aa14c12e/C, priority=13, startTime=1732713981066; duration=0sec 2024-11-27T13:26:21,903 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T13:26:21,903 DEBUG [RS:0;a0541979a851:32819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a5bdef3839c25f6b6634d128aa14c12e:C 2024-11-27T13:26:22,017 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=431, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d0f591c9c52547d48347ba517bd1a75d 2024-11-27T13:26:22,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/50300c7f23b049e5a7d883512201f327 is 50, key is test_row_0/B:col10/1732713980223/Put/seqid=0 2024-11-27T13:26:22,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742522_1698 (size=12301) 2024-11-27T13:26:22,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:22,426 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/50300c7f23b049e5a7d883512201f327 2024-11-27T13:26:22,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/0795c8996d5343288a8eed4ee6bbc41c is 50, key is test_row_0/C:col10/1732713980223/Put/seqid=0 2024-11-27T13:26:22,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742523_1699 (size=12301) 2024-11-27T13:26:22,834 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/0795c8996d5343288a8eed4ee6bbc41c 2024-11-27T13:26:22,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/d0f591c9c52547d48347ba517bd1a75d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d0f591c9c52547d48347ba517bd1a75d 2024-11-27T13:26:22,840 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d0f591c9c52547d48347ba517bd1a75d, entries=150, sequenceid=431, filesize=30.5 K 2024-11-27T13:26:22,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/50300c7f23b049e5a7d883512201f327 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/50300c7f23b049e5a7d883512201f327 2024-11-27T13:26:22,843 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/50300c7f23b049e5a7d883512201f327, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T13:26:22,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/0795c8996d5343288a8eed4ee6bbc41c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0795c8996d5343288a8eed4ee6bbc41c 2024-11-27T13:26:22,846 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0795c8996d5343288a8eed4ee6bbc41c, entries=150, sequenceid=431, filesize=12.0 K 2024-11-27T13:26:22,846 INFO [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=20.13 KB/20610 for a5bdef3839c25f6b6634d128aa14c12e in 1645ms, sequenceid=431, compaction requested=false 2024-11-27T13:26:22,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.HRegion(2538): Flush status journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:22,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:22,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a0541979a851:0-1 {event_type=RS_FLUSH_REGIONS, pid=194}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=194 2024-11-27T13:26:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster(4106): Remote procedure done, pid=194 2024-11-27T13:26:22,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=193 2024-11-27T13:26:22,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=193, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5620 sec 2024-11-27T13:26:22,849 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=193, table=TestAcidGuarantees in 2.5650 sec 2024-11-27T13:26:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-27T13:26:24,389 INFO [Thread-2545 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 193 completed 2024-11-27T13:26:29,250 DEBUG [Thread-2541 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:59011 2024-11-27T13:26:29,250 DEBUG [Thread-2541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:29,637 DEBUG [Thread-2537 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:59011 2024-11-27T13:26:29,637 DEBUG [Thread-2537 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6568 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6159 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6191 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6566 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6189 2024-11-27T13:26:29,637 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-27T13:26:29,637 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:26:29,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:59011 2024-11-27T13:26:29,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:29,638 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-27T13:26:29,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-27T13:26:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=195, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:29,640 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713989640"}]},"ts":"1732713989640"} 2024-11-27T13:26:29,641 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-27T13:26:29,643 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-27T13:26:29,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-27T13:26:29,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, UNASSIGN}] 2024-11-27T13:26:29,645 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, UNASSIGN 2024-11-27T13:26:29,645 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=197 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=CLOSING, regionLocation=a0541979a851,32819,1732713812705 2024-11-27T13:26:29,646 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-27T13:26:29,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE; CloseRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705}] 2024-11-27T13:26:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:29,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to a0541979a851,32819,1732713812705 2024-11-27T13:26:29,797 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(124): Close a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1681): Closing a5bdef3839c25f6b6634d128aa14c12e, disabling compactions & flushes 2024-11-27T13:26:29,797 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. after waiting 0 ms 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:29,797 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(2837): Flushing a5bdef3839c25f6b6634d128aa14c12e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-27T13:26:29,797 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=A 2024-11-27T13:26:29,798 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:29,798 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=B 2024-11-27T13:26:29,798 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:29,798 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a5bdef3839c25f6b6634d128aa14c12e, store=C 2024-11-27T13:26:29,798 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-27T13:26:29,802 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127bb8907790563459d89d34984732a161d_a5bdef3839c25f6b6634d128aa14c12e is 50, key is test_row_0/A:col10/1732713989635/Put/seqid=0 2024-11-27T13:26:29,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742524_1700 (size=12454) 2024-11-27T13:26:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:30,206 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T13:26:30,209 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241127bb8907790563459d89d34984732a161d_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bb8907790563459d89d34984732a161d_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:30,210 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/9505361df6d241948253811359d80e0f, store: [table=TestAcidGuarantees family=A region=a5bdef3839c25f6b6634d128aa14c12e] 2024-11-27T13:26:30,210 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/9505361df6d241948253811359d80e0f is 175, key is test_row_0/A:col10/1732713989635/Put/seqid=0 2024-11-27T13:26:30,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742525_1701 (size=31255) 2024-11-27T13:26:30,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:30,614 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=442, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/9505361df6d241948253811359d80e0f 2024-11-27T13:26:30,619 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5d05c386a8c94a9b893b1f05aa3dc53d is 50, key is test_row_0/B:col10/1732713989635/Put/seqid=0 2024-11-27T13:26:30,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742526_1702 (size=12301) 2024-11-27T13:26:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:31,022 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5d05c386a8c94a9b893b1f05aa3dc53d 2024-11-27T13:26:31,028 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/27950a8dda424547a1d82a05a30fe551 is 50, key is test_row_0/C:col10/1732713989635/Put/seqid=0 2024-11-27T13:26:31,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742527_1703 (size=12301) 2024-11-27T13:26:31,053 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T13:26:31,431 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=442 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/27950a8dda424547a1d82a05a30fe551 2024-11-27T13:26:31,434 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/A/9505361df6d241948253811359d80e0f as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/9505361df6d241948253811359d80e0f 2024-11-27T13:26:31,437 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/9505361df6d241948253811359d80e0f, entries=150, sequenceid=442, filesize=30.5 K 2024-11-27T13:26:31,437 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/B/5d05c386a8c94a9b893b1f05aa3dc53d as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5d05c386a8c94a9b893b1f05aa3dc53d 2024-11-27T13:26:31,439 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5d05c386a8c94a9b893b1f05aa3dc53d, entries=150, sequenceid=442, filesize=12.0 K 2024-11-27T13:26:31,440 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/.tmp/C/27950a8dda424547a1d82a05a30fe551 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/27950a8dda424547a1d82a05a30fe551 2024-11-27T13:26:31,442 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/27950a8dda424547a1d82a05a30fe551, entries=150, sequenceid=442, filesize=12.0 K 2024-11-27T13:26:31,443 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a5bdef3839c25f6b6634d128aa14c12e in 1646ms, sequenceid=442, compaction requested=true 2024-11-27T13:26:31,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a] to archive 2024-11-27T13:26:31,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:26:31,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/20d622612aee4ba688b7fd28638c915f 2024-11-27T13:26:31,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/13a72d2ba4584301b814984ce224654f 2024-11-27T13:26:31,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/bea466d98d3440a0832df43701bbb117 2024-11-27T13:26:31,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5b2970d7713e439c8a3e3546492eea47 2024-11-27T13:26:31,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e2ff89c33652428792c90c6aca736085 2024-11-27T13:26:31,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/e1d007dae82349f8ba1dc9cd0214a3ae 2024-11-27T13:26:31,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/8f52fc1a08db47c4a623c6727859efba 2024-11-27T13:26:31,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5ac8690c80dd47b480b12d2d1c6a5f33 2024-11-27T13:26:31,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ab95cc41578e45928a1c9d2d184f0acd 2024-11-27T13:26:31,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/baac7da93b5c4a92aa49e454001ebbfe 2024-11-27T13:26:31,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/ce10c9c2c4d849d087c0541eded064df 2024-11-27T13:26:31,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/3c1cb5762d6f48f49927713d59235fba 2024-11-27T13:26:31,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/0ff1f282ca514737a025e74da87e040a 2024-11-27T13:26:31,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/1fa55ee469fa4d3dbd366832eee7f1c3 2024-11-27T13:26:31,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/813f53d4edf64a4a8f49c8ffe361388d 2024-11-27T13:26:31,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b7f989c9bb474093975f046d50146797 2024-11-27T13:26:31,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/dcfa1bdfa03a4fec887407b12cc37baa 2024-11-27T13:26:31,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/f4b235fccb154ac7a63dcd1b4e078652 2024-11-27T13:26:31,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/c2f366e57ce8443694a35726fb6a0e20 2024-11-27T13:26:31,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/5dca65f7874242299ef882d82145a863 2024-11-27T13:26:31,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/240299efb45f4aadbde6c07a4f158150 2024-11-27T13:26:31,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b911753f6f564433ac217e548b8cc07d 2024-11-27T13:26:31,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/146ebdb0dc6e4f3280de01ed4d379b3b 2024-11-27T13:26:31,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d20a6928b6ce4c419711ad6eea73f92e 2024-11-27T13:26:31,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/87b2eb51dcdd42288d0f64ed6ac0a80b 2024-11-27T13:26:31,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/6d6e07c69b5b42dc809166646774fe41 2024-11-27T13:26:31,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/23998b48a0874112a25e4a2bbdfcb73b 2024-11-27T13:26:31,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/7c4de3a48cad46c2a62fe4697c19d74c 2024-11-27T13:26:31,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/14967140b57a474cb8c9168bbcc5026a 2024-11-27T13:26:31,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/f399f79cbb1e496c85c63d639d2e9557, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8524323c66584aac90a739f51904ebbb, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c6a12cb23d35426d88f947bf7edc4666, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b8c0734f812843509189e876fe647a89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/00ea2f8dd9ac48d5ad785f860e83dc37, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/44854d6de6194bd38bfe4263c72c3c25, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/879ad1ee9fd04da7a85a064c004e5e1b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/313d690a71e940a3a6a0157842af7910, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71] to archive 2024-11-27T13:26:31,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:26:31,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/59b470302d5241df8116a67e8c4c9f1f 2024-11-27T13:26:31,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9d772179952f4180bd79f955711ba406 2024-11-27T13:26:31,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/f399f79cbb1e496c85c63d639d2e9557 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/f399f79cbb1e496c85c63d639d2e9557 2024-11-27T13:26:31,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/fc3693937fb14469aca07e3ad92afdcf 2024-11-27T13:26:31,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c3557fd170ed4c66a273d60955c0eb8f 2024-11-27T13:26:31,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8524323c66584aac90a739f51904ebbb to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8524323c66584aac90a739f51904ebbb 2024-11-27T13:26:31,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/04ffc17355544e7c8fc54cb72abf4e83 2024-11-27T13:26:31,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/625eadfe4b514c678298c526af436bac 2024-11-27T13:26:31,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c6a12cb23d35426d88f947bf7edc4666 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/c6a12cb23d35426d88f947bf7edc4666 2024-11-27T13:26:31,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/1e6a21f3d2e1435fb8855e887cd36ef4 2024-11-27T13:26:31,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9cf7a30f63bf41dea6971da47f1a6e5e 2024-11-27T13:26:31,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b8c0734f812843509189e876fe647a89 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b8c0734f812843509189e876fe647a89 2024-11-27T13:26:31,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4b924c4d65d14ca1b09e167212a29621 2024-11-27T13:26:31,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5cef3c37a70444d18e1b2a581965f160 2024-11-27T13:26:31,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/00ea2f8dd9ac48d5ad785f860e83dc37 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/00ea2f8dd9ac48d5ad785f860e83dc37 2024-11-27T13:26:31,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/2c2fc7c340b0471ea8456bad009d68c1 2024-11-27T13:26:31,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/3877a1bbe86146b58f588e03ea5323f9 2024-11-27T13:26:31,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/44854d6de6194bd38bfe4263c72c3c25 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/44854d6de6194bd38bfe4263c72c3c25 2024-11-27T13:26:31,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/0a43c787500c43b29ba691fdce6da25b 2024-11-27T13:26:31,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/a7e06946d7ac4e8a9903498b463c5e51 2024-11-27T13:26:31,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/879ad1ee9fd04da7a85a064c004e5e1b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/879ad1ee9fd04da7a85a064c004e5e1b 2024-11-27T13:26:31,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/4e9043e02d00427cb57654975f9d9b81 2024-11-27T13:26:31,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/9f78e6a93dfa456a98d3820f615e6e8e 2024-11-27T13:26:31,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/b6f58bbb088948b78dea7ed7f29636ea 2024-11-27T13:26:31,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/304d4b73162f4ccda39d4f4b52fac1cc 2024-11-27T13:26:31,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/313d690a71e940a3a6a0157842af7910 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/313d690a71e940a3a6a0157842af7910 2024-11-27T13:26:31,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/8955c9af839e41d1a6fc083ce2f2bc9f 2024-11-27T13:26:31,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/773a736d7211477e9d7adaf2baf4370c 2024-11-27T13:26:31,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/93c73f9b254248cb8543202fdbf23c71 2024-11-27T13:26:31,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/814ecbdd831245619023713430f36003, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/dfaddec3ac2b4983935d49d37ed235ff, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6705d6fe9b3d49178350fa5cc2dd0a85, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/2368ce67db3c4da7bc64d3494896a29a, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/d13f6b97c31b4661b6e0d087ee9006e1, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b0dddf2405ca439c878f4d58850d2585, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fa5c4aa979ee452f9f4624ed5722ebf7, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77] to archive 2024-11-27T13:26:31,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T13:26:31,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/33f69d7cfc9543d593c0c8c3953fbe05 2024-11-27T13:26:31,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/68bde37ea3294453aa4e5127dcc3e612 2024-11-27T13:26:31,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/60c5bd483ccf4f11a4e7a4f49d3c0bfe 2024-11-27T13:26:31,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/97cd6f8e97e94a4f866ab5015f8af0b1 2024-11-27T13:26:31,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/72a202af7be940b6b836d78a33c48d06 2024-11-27T13:26:31,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/814ecbdd831245619023713430f36003 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/814ecbdd831245619023713430f36003 2024-11-27T13:26:31,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/cc771dd61337416d9ac168eab90383f4 2024-11-27T13:26:31,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/3b1443c4e8c4431d9618c9e5db033fc9 2024-11-27T13:26:31,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/dfaddec3ac2b4983935d49d37ed235ff to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/dfaddec3ac2b4983935d49d37ed235ff 2024-11-27T13:26:31,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/105e02d0200a4faaaf93a05b58577995 2024-11-27T13:26:31,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fcf186c5903642a98862acc931445b57 2024-11-27T13:26:31,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6705d6fe9b3d49178350fa5cc2dd0a85 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6705d6fe9b3d49178350fa5cc2dd0a85 2024-11-27T13:26:31,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/46392fd8b16d465b89d99388a545bbe4 2024-11-27T13:26:31,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/baf521ad65b04260babe4cf894418382 2024-11-27T13:26:31,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/2368ce67db3c4da7bc64d3494896a29a to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/2368ce67db3c4da7bc64d3494896a29a 2024-11-27T13:26:31,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/83cb18d099514a9fbd09a9aeff90fad0 2024-11-27T13:26:31,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/63b676d41900443dbe46d867055f3f89 2024-11-27T13:26:31,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/d13f6b97c31b4661b6e0d087ee9006e1 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/d13f6b97c31b4661b6e0d087ee9006e1 2024-11-27T13:26:31,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/5f98a87690384d489469f5e11521abf8 2024-11-27T13:26:31,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b78c1834bca440e6b1d379b77cb087d5 2024-11-27T13:26:31,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b0dddf2405ca439c878f4d58850d2585 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b0dddf2405ca439c878f4d58850d2585 2024-11-27T13:26:31,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/8790f083695147d0a55a7e7d78ea17d2 2024-11-27T13:26:31,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fe16e83043e44247a447d061894d7db9 2024-11-27T13:26:31,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/a97be4083c2c42b9aa94c96ea3827a5b 2024-11-27T13:26:31,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/6787217b535748faab4ae45363e6b0b2 2024-11-27T13:26:31,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fa5c4aa979ee452f9f4624ed5722ebf7 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/fa5c4aa979ee452f9f4624ed5722ebf7 2024-11-27T13:26:31,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/e1f7ddae2ba649b7aef6ba5b2d3d72ac 2024-11-27T13:26:31,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/9775317150394ae08a8987dfd626521b 2024-11-27T13:26:31,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/b6a96d800822468da1b3e8c3681b0f77 2024-11-27T13:26:31,517 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits/445.seqid, newMaxSeqId=445, maxSeqId=4 2024-11-27T13:26:31,518 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e. 2024-11-27T13:26:31,518 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] regionserver.HRegion(1635): Region close journal for a5bdef3839c25f6b6634d128aa14c12e: 2024-11-27T13:26:31,519 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION, pid=198}] handler.UnassignRegionHandler(170): Closed a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,519 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34317 {}] assignment.AssignmentManager(1518): Reporting a0541979a851,32819,1732713812705 state does not match state=CLOSED, location=null, table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e (time since last update=1873ms) 2024-11-27T13:26:31,519 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=197 updating hbase:meta row=a5bdef3839c25f6b6634d128aa14c12e, regionState=CLOSED 2024-11-27T13:26:31,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=198, resume processing ppid=197 2024-11-27T13:26:31,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, ppid=197, state=SUCCESS; CloseRegionProcedure a5bdef3839c25f6b6634d128aa14c12e, server=a0541979a851,32819,1732713812705 in 1.8740 sec 2024-11-27T13:26:31,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=197, resume processing ppid=196 2024-11-27T13:26:31,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, ppid=196, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a5bdef3839c25f6b6634d128aa14c12e, UNASSIGN in 1.8770 sec 2024-11-27T13:26:31,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=195 2024-11-27T13:26:31,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=195, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8790 sec 2024-11-27T13:26:31,524 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732713991524"}]},"ts":"1732713991524"} 2024-11-27T13:26:31,524 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-27T13:26:31,530 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-27T13:26:31,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8920 sec 2024-11-27T13:26:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-11-27T13:26:31,743 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 195 completed 2024-11-27T13:26:31,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-27T13:26:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] procedure2.ProcedureExecutor(1098): Stored pid=199, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,745 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=199, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=199 2024-11-27T13:26:31,745 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=199, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,746 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,748 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C, FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits] 2024-11-27T13:26:31,750 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/9505361df6d241948253811359d80e0f to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/9505361df6d241948253811359d80e0f 2024-11-27T13:26:31,751 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b347aa832ff94e808f362ec3fdbbc9c8 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/b347aa832ff94e808f362ec3fdbbc9c8 2024-11-27T13:26:31,752 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d0f591c9c52547d48347ba517bd1a75d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/A/d0f591c9c52547d48347ba517bd1a75d 2024-11-27T13:26:31,754 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/40e7660452f6497ea47961444771ebdf to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/40e7660452f6497ea47961444771ebdf 2024-11-27T13:26:31,754 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/50300c7f23b049e5a7d883512201f327 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/50300c7f23b049e5a7d883512201f327 2024-11-27T13:26:31,755 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5d05c386a8c94a9b893b1f05aa3dc53d to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/B/5d05c386a8c94a9b893b1f05aa3dc53d 2024-11-27T13:26:31,757 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0795c8996d5343288a8eed4ee6bbc41c to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0795c8996d5343288a8eed4ee6bbc41c 2024-11-27T13:26:31,758 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0f7ef2f38b2440468f196f44b946eb93 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/0f7ef2f38b2440468f196f44b946eb93 2024-11-27T13:26:31,759 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/27950a8dda424547a1d82a05a30fe551 to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/C/27950a8dda424547a1d82a05a30fe551 2024-11-27T13:26:31,761 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits/445.seqid to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e/recovered.edits/445.seqid 2024-11-27T13:26:31,761 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/default/TestAcidGuarantees/a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,761 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-27T13:26:31,762 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:26:31,762 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-27T13:26:31,764 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271fb7011baf9b45b3835db106212d4b70_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411271fb7011baf9b45b3835db106212d4b70_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,765 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272b05d20e669047d1ac47e3add5ba1e15_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272b05d20e669047d1ac47e3add5ba1e15_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,766 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272d48e3413c18486a8e583bb0db7c79b3_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411272d48e3413c18486a8e583bb0db7c79b3_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,767 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274d3d61c601264ce4944e299f09271137_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411274d3d61c601264ce4944e299f09271137_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,768 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275b96d3504e9b4df39df473a49d2ea6a5_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275b96d3504e9b4df39df473a49d2ea6a5_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,769 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275fcdf9bcad2b44ff834a88c71557cb7a_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411275fcdf9bcad2b44ff834a88c71557cb7a_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,770 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127637d36b7c736496a81792beb54e62783_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127637d36b7c736496a81792beb54e62783_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,771 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276f2e660bc9234b5f910dfa6d299cfe64_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411276f2e660bc9234b5f910dfa6d299cfe64_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,772 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112785bf1d9e065047feb8fcc5f422cb7d39_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112785bf1d9e065047feb8fcc5f422cb7d39_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,773 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112788aa4c7a401a4377a58ac06acb3ee510_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112788aa4c7a401a4377a58ac06acb3ee510_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,774 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278aae2d41e8c94efd99fdd5d23f430cae_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411278aae2d41e8c94efd99fdd5d23f430cae_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,774 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279403a971ba0a461aa9e79083b9bb8cb7_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279403a971ba0a461aa9e79083b9bb8cb7_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,775 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279ed019e97534471bbea3efa8712d8602_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411279ed019e97534471bbea3efa8712d8602_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,776 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bb8907790563459d89d34984732a161d_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bb8907790563459d89d34984732a161d_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,777 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bc88bea5309d4b93843865a5de601f05_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127bc88bea5309d4b93843865a5de601f05_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,778 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c07b4f4fed644dee9cb2d3d6a25772fd_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c07b4f4fed644dee9cb2d3d6a25772fd_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,779 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c0f3b6bd08ef4f33a1b0f73f5aa675af_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127c0f3b6bd08ef4f33a1b0f73f5aa675af_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,780 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dd564ecedaa74a20999d04a293e157ab_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127dd564ecedaa74a20999d04a293e157ab_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,780 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0a460d9fd3440a5a2dced0291d9fb5e_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e0a460d9fd3440a5a2dced0291d9fb5e_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,781 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e29977dcca3145a29e80348583dad02c_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127e29977dcca3145a29e80348583dad02c_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,782 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ecb0ac0642734ca689e08de06a8bbaee_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127ecb0ac0642734ca689e08de06a8bbaee_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,783 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5afc0bbfa3f4f34a824f9b1733dce0e_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f5afc0bbfa3f4f34a824f9b1733dce0e_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,784 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f74a234dd8da480780fcf4bb40f53813_a5bdef3839c25f6b6634d128aa14c12e to hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241127f74a234dd8da480780fcf4bb40f53813_a5bdef3839c25f6b6634d128aa14c12e 2024-11-27T13:26:31,784 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-27T13:26:31,786 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=199, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,788 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-27T13:26:31,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-27T13:26:31,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=199, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-27T13:26:31,790 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732713991790"}]},"ts":"9223372036854775807"} 2024-11-27T13:26:31,792 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-27T13:26:31,792 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a5bdef3839c25f6b6634d128aa14c12e, NAME => 'TestAcidGuarantees,,1732713957844.a5bdef3839c25f6b6634d128aa14c12e.', STARTKEY => '', ENDKEY => ''}] 2024-11-27T13:26:31,792 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-27T13:26:31,792 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732713991792"}]},"ts":"9223372036854775807"} 2024-11-27T13:26:31,793 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-27T13:26:31,795 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=199, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-27T13:26:31,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-11-27T13:26:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=199 2024-11-27T13:26:31,846 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 199 completed 2024-11-27T13:26:31,855 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=236 (was 239), OpenFileDescriptor=448 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=370 (was 424), ProcessCount=11 (was 11), AvailableMemoryMB=4040 (was 4070) 2024-11-27T13:26:31,855 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-27T13:26:31,855 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-27T13:26:31,855 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:59011 2024-11-27T13:26:31,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:31,855 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T13:26:31,855 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=633883069, stopped=false 2024-11-27T13:26:31,856 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=a0541979a851,34317,1732713811966 2024-11-27T13:26:31,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T13:26:31,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T13:26:31,857 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-27T13:26:31,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:26:31,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:26:31,858 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:31,858 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'a0541979a851,32819,1732713812705' ***** 2024-11-27T13:26:31,858 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-27T13:26:31,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T13:26:31,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T13:26:31,858 INFO [RS:0;a0541979a851:32819 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T13:26:31,859 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(3579): Received CLOSE for 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1224): stopping server a0541979a851,32819,1732713812705 2024-11-27T13:26:31,859 DEBUG [RS:0;a0541979a851:32819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T13:26:31,859 INFO [RS:0;a0541979a851:32819 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T13:26:31,860 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3966f1275e7e6d5ced325aca1684d4b9, disabling compactions & flushes 2024-11-27T13:26:31,860 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. after waiting 0 ms 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:26:31,860 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3966f1275e7e6d5ced325aca1684d4b9 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-27T13:26:31,860 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-27T13:26:31,860 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1603): Online Regions={3966f1275e7e6d5ced325aca1684d4b9=hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9., 1588230740=hbase:meta,,1.1588230740} 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-27T13:26:31,860 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T13:26:31,860 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T13:26:31,860 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-27T13:26:31,860 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:26:31,865 INFO [regionserver/a0541979a851:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T13:26:31,876 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/.tmp/info/1d5790976d1e447ebefc50b7b2b39870 is 45, key is default/info:d/1732713817274/Put/seqid=0 2024-11-27T13:26:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742528_1704 (size=5037) 2024-11-27T13:26:31,881 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/info/650b573426df4d64827b44195c731730 is 143, key is hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9./info:regioninfo/1732713817154/Put/seqid=0 2024-11-27T13:26:31,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742529_1705 (size=7725) 2024-11-27T13:26:31,950 INFO [regionserver/a0541979a851:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T13:26:31,950 INFO [regionserver/a0541979a851:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T13:26:32,061 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:26:32,261 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3966f1275e7e6d5ced325aca1684d4b9 2024-11-27T13:26:32,279 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/.tmp/info/1d5790976d1e447ebefc50b7b2b39870 2024-11-27T13:26:32,282 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/.tmp/info/1d5790976d1e447ebefc50b7b2b39870 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/info/1d5790976d1e447ebefc50b7b2b39870 2024-11-27T13:26:32,285 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/info/1d5790976d1e447ebefc50b7b2b39870, entries=2, sequenceid=6, filesize=4.9 K 2024-11-27T13:26:32,285 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/info/650b573426df4d64827b44195c731730 2024-11-27T13:26:32,285 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3966f1275e7e6d5ced325aca1684d4b9 in 425ms, sequenceid=6, compaction requested=false 2024-11-27T13:26:32,289 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/namespace/3966f1275e7e6d5ced325aca1684d4b9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-27T13:26:32,289 INFO [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:26:32,289 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3966f1275e7e6d5ced325aca1684d4b9: 2024-11-27T13:26:32,289 DEBUG [RS_CLOSE_REGION-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732713816321.3966f1275e7e6d5ced325aca1684d4b9. 2024-11-27T13:26:32,303 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/rep_barrier/2a53bf1700c44f12925dbb845ea46da9 is 102, key is TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257./rep_barrier:/1732713845681/DeleteFamily/seqid=0 2024-11-27T13:26:32,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742530_1706 (size=6025) 2024-11-27T13:26:32,459 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-27T13:26:32,460 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-27T13:26:32,461 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T13:26:32,661 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T13:26:32,707 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/rep_barrier/2a53bf1700c44f12925dbb845ea46da9 2024-11-27T13:26:32,724 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/table/7d497badbac1490dbe623fb0fa734cda is 96, key is TestAcidGuarantees,,1732713817478.2b5b15f41df6d1ae2583263f41ba6257./table:/1732713845681/DeleteFamily/seqid=0 2024-11-27T13:26:32,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742531_1707 (size=5942) 2024-11-27T13:26:32,862 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-27T13:26:32,862 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-27T13:26:32,862 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T13:26:33,062 DEBUG [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-27T13:26:33,128 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/table/7d497badbac1490dbe623fb0fa734cda 2024-11-27T13:26:33,131 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/info/650b573426df4d64827b44195c731730 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/info/650b573426df4d64827b44195c731730 2024-11-27T13:26:33,134 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/info/650b573426df4d64827b44195c731730, entries=22, sequenceid=93, filesize=7.5 K 2024-11-27T13:26:33,135 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/rep_barrier/2a53bf1700c44f12925dbb845ea46da9 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/rep_barrier/2a53bf1700c44f12925dbb845ea46da9 2024-11-27T13:26:33,137 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/rep_barrier/2a53bf1700c44f12925dbb845ea46da9, entries=6, sequenceid=93, filesize=5.9 K 2024-11-27T13:26:33,137 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/.tmp/table/7d497badbac1490dbe623fb0fa734cda as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/table/7d497badbac1490dbe623fb0fa734cda 2024-11-27T13:26:33,140 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/table/7d497badbac1490dbe623fb0fa734cda, entries=9, sequenceid=93, filesize=5.8 K 2024-11-27T13:26:33,141 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1281ms, sequenceid=93, compaction requested=false 2024-11-27T13:26:33,146 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-27T13:26:33,146 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T13:26:33,146 INFO [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-27T13:26:33,146 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-27T13:26:33,146 DEBUG [RS_CLOSE_META-regionserver/a0541979a851:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T13:26:33,262 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1250): stopping server a0541979a851,32819,1732713812705; all regions closed. 2024-11-27T13:26:33,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741834_1010 (size=26050) 2024-11-27T13:26:33,268 DEBUG [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/oldWALs 2024-11-27T13:26:33,268 INFO [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL a0541979a851%2C32819%2C1732713812705.meta:.meta(num 1732713816062) 2024-11-27T13:26:33,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741832_1008 (size=16223928) 2024-11-27T13:26:33,271 DEBUG [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/oldWALs 2024-11-27T13:26:33,271 INFO [RS:0;a0541979a851:32819 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL a0541979a851%2C32819%2C1732713812705:(num 1732713815117) 2024-11-27T13:26:33,271 DEBUG [RS:0;a0541979a851:32819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:33,271 INFO [RS:0;a0541979a851:32819 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T13:26:33,272 INFO [RS:0;a0541979a851:32819 {}] hbase.ChoreService(370): Chore service for: regionserver/a0541979a851:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-27T13:26:33,272 INFO [regionserver/a0541979a851:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-27T13:26:33,272 INFO [RS:0;a0541979a851:32819 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:32819 2024-11-27T13:26:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a0541979a851,32819,1732713812705 2024-11-27T13:26:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T13:26:33,278 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a0541979a851,32819,1732713812705] 2024-11-27T13:26:33,278 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing a0541979a851,32819,1732713812705; numProcessing=1 2024-11-27T13:26:33,279 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/a0541979a851,32819,1732713812705 already deleted, retry=false 2024-11-27T13:26:33,279 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; a0541979a851,32819,1732713812705 expired; onlineServers=0 2024-11-27T13:26:33,279 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'a0541979a851,34317,1732713811966' ***** 2024-11-27T13:26:33,279 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T13:26:33,279 DEBUG [M:0;a0541979a851:34317 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d66913, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a0541979a851/172.17.0.2:0 2024-11-27T13:26:33,279 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegionServer(1224): stopping server a0541979a851,34317,1732713811966 2024-11-27T13:26:33,279 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegionServer(1250): stopping server a0541979a851,34317,1732713811966; all regions closed. 2024-11-27T13:26:33,279 DEBUG [M:0;a0541979a851:34317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T13:26:33,279 DEBUG [M:0;a0541979a851:34317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T13:26:33,280 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T13:26:33,280 DEBUG [M:0;a0541979a851:34317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T13:26:33,280 DEBUG [master/a0541979a851:0:becomeActiveMaster-HFileCleaner.small.0-1732713814833 {}] cleaner.HFileCleaner(306): Exit Thread[master/a0541979a851:0:becomeActiveMaster-HFileCleaner.small.0-1732713814833,5,FailOnTimeoutGroup] 2024-11-27T13:26:33,280 DEBUG [master/a0541979a851:0:becomeActiveMaster-HFileCleaner.large.0-1732713814832 {}] cleaner.HFileCleaner(306): Exit Thread[master/a0541979a851:0:becomeActiveMaster-HFileCleaner.large.0-1732713814832,5,FailOnTimeoutGroup] 2024-11-27T13:26:33,280 INFO [M:0;a0541979a851:34317 {}] hbase.ChoreService(370): Chore service for: master/a0541979a851:0 had [] on shutdown 2024-11-27T13:26:33,280 DEBUG [M:0;a0541979a851:34317 {}] master.HMaster(1733): Stopping service threads 2024-11-27T13:26:33,280 INFO [M:0;a0541979a851:34317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T13:26:33,280 ERROR [M:0;a0541979a851:34317 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-27T13:26:33,281 INFO [M:0;a0541979a851:34317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T13:26:33,281 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T13:26:33,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T13:26:33,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T13:26:33,281 DEBUG [M:0;a0541979a851:34317 {}] zookeeper.ZKUtil(347): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T13:26:33,281 WARN [M:0;a0541979a851:34317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T13:26:33,282 INFO [M:0;a0541979a851:34317 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-27T13:26:33,282 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T13:26:33,282 INFO [M:0;a0541979a851:34317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T13:26:33,282 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T13:26:33,282 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:26:33,282 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:26:33,282 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T13:26:33,282 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:26:33,282 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=789.44 KB heapSize=970.93 KB 2024-11-27T13:26:33,297 DEBUG [M:0;a0541979a851:34317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8db0210a3ee4c6e81db0723c862c5a7 is 82, key is hbase:meta,,1/info:regioninfo/1732713816210/Put/seqid=0 2024-11-27T13:26:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742532_1708 (size=5672) 2024-11-27T13:26:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T13:26:33,378 INFO [RS:0;a0541979a851:32819 {}] regionserver.HRegionServer(1307): Exiting; stopping=a0541979a851,32819,1732713812705; zookeeper connection closed. 2024-11-27T13:26:33,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32819-0x100392645960001, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T13:26:33,378 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@461d9851 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@461d9851 2024-11-27T13:26:33,378 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T13:26:33,701 INFO [M:0;a0541979a851:34317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2223 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8db0210a3ee4c6e81db0723c862c5a7 2024-11-27T13:26:33,728 DEBUG [M:0;a0541979a851:34317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cce9f887835240fb87ae86e220e01148 is 2284, key is \x00\x00\x00\x00\x00\x00\x00k/proc:d/1732713904130/Put/seqid=0 2024-11-27T13:26:33,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742533_1709 (size=48631) 2024-11-27T13:26:34,131 INFO [M:0;a0541979a851:34317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=788.89 KB at sequenceid=2223 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cce9f887835240fb87ae86e220e01148 2024-11-27T13:26:34,134 INFO [M:0;a0541979a851:34317 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cce9f887835240fb87ae86e220e01148 2024-11-27T13:26:34,152 DEBUG [M:0;a0541979a851:34317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3af86dea6b16489e931395c873f30c4c is 69, key is a0541979a851,32819,1732713812705/rs:state/1732713814865/Put/seqid=0 2024-11-27T13:26:34,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073742534_1710 (size=5156) 2024-11-27T13:26:34,556 INFO [M:0;a0541979a851:34317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2223 (bloomFilter=true), to=hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3af86dea6b16489e931395c873f30c4c 2024-11-27T13:26:34,560 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d8db0210a3ee4c6e81db0723c862c5a7 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8db0210a3ee4c6e81db0723c862c5a7 2024-11-27T13:26:34,563 INFO [M:0;a0541979a851:34317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d8db0210a3ee4c6e81db0723c862c5a7, entries=8, sequenceid=2223, filesize=5.5 K 2024-11-27T13:26:34,563 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cce9f887835240fb87ae86e220e01148 as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cce9f887835240fb87ae86e220e01148 2024-11-27T13:26:34,566 INFO [M:0;a0541979a851:34317 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cce9f887835240fb87ae86e220e01148 2024-11-27T13:26:34,566 INFO [M:0;a0541979a851:34317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cce9f887835240fb87ae86e220e01148, entries=199, sequenceid=2223, filesize=47.5 K 2024-11-27T13:26:34,567 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3af86dea6b16489e931395c873f30c4c as hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3af86dea6b16489e931395c873f30c4c 2024-11-27T13:26:34,570 INFO [M:0;a0541979a851:34317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42217/user/jenkins/test-data/f1845e35-a380-aa2c-409c-0771c0d383ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3af86dea6b16489e931395c873f30c4c, entries=1, sequenceid=2223, filesize=5.0 K 2024-11-27T13:26:34,571 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegion(3040): Finished flush of dataSize ~789.44 KB/808390, heapSize ~970.63 KB/993928, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1289ms, sequenceid=2223, compaction requested=false 2024-11-27T13:26:34,573 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T13:26:34,573 DEBUG [M:0;a0541979a851:34317 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-27T13:26:34,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45727 is added to blk_1073741830_1006 (size=954204) 2024-11-27T13:26:34,575 INFO [M:0;a0541979a851:34317 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-27T13:26:34,575 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-27T13:26:34,575 INFO [M:0;a0541979a851:34317 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34317 2024-11-27T13:26:34,578 DEBUG [M:0;a0541979a851:34317 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/a0541979a851,34317,1732713811966 already deleted, retry=false 2024-11-27T13:26:34,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T13:26:34,679 INFO [M:0;a0541979a851:34317 {}] regionserver.HRegionServer(1307): Exiting; stopping=a0541979a851,34317,1732713811966; zookeeper connection closed. 2024-11-27T13:26:34,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34317-0x100392645960000, quorum=127.0.0.1:59011, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T13:26:34,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T13:26:34,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T13:26:34,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T13:26:34,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T13:26:34,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.log.dir/,STOPPED} 2024-11-27T13:26:34,691 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T13:26:34,691 WARN [BP-117857532-172.17.0.2-1732713809119 heartbeating to localhost/127.0.0.1:42217 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T13:26:34,691 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T13:26:34,691 WARN [BP-117857532-172.17.0.2-1732713809119 heartbeating to localhost/127.0.0.1:42217 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-117857532-172.17.0.2-1732713809119 (Datanode Uuid 8633b0dd-898d-41a0-8b1f-aeedc2e4d162) service to localhost/127.0.0.1:42217 2024-11-27T13:26:34,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data1/current/BP-117857532-172.17.0.2-1732713809119 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T13:26:34,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/cluster_934dc756-dc3d-856c-d3d2-4c44f2f2bc8b/dfs/data/data2/current/BP-117857532-172.17.0.2-1732713809119 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T13:26:34,695 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T13:26:34,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T13:26:34,704 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T13:26:34,704 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T13:26:34,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T13:26:34,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/1b50eb87-94e4-ebf3-2700-cb80df792d2a/hadoop.log.dir/,STOPPED} 2024-11-27T13:26:34,722 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-27T13:26:34,865 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down